code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import os
import sys
import transformers
__UpperCamelCase : Tuple = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 519
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase = {"configuration_vit_msn": ["VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMSNConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMSNModel",
"ViTMSNForImageClassification",
"ViTMSNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 490
| 0
|
'''simple docstring'''
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
a_ : Any = Path(__file__).resolve().parents[3] / "src"
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(4_2)
a_ : Any = {"base": "patrickvonplaten/wav2vec2_tiny_random", "robust": "patrickvonplaten/wav2vec2_tiny_random_robust"}
a_ : List[Any] = "zero2"
a_ : str = "zero3"
a_ : Tuple = [ZEROa, ZEROa]
def _A (lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :str ) -> Optional[Any]:
'''simple docstring'''
_a = parameterized.to_safe_name('_'.join(str(lowerCAmelCase__ ) for x in param.args ) )
return f'{func.__name__}_{param_based_name}'
# Cartesian-product of zero stages with models to test
a_ : Optional[Any] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class a ( _SCREAMING_SNAKE_CASE ):
@parameterized.expand(__magic_name__ , name_func=__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> List[Any]:
self.run_and_check(
stage=__magic_name__ , model=__magic_name__ , distributed=__magic_name__ , fpaa=__magic_name__ , )
@require_torch_multi_gpu
@parameterized.expand(__magic_name__ , name_func=__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> List[Any]:
self.run_and_check(
stage=__magic_name__ , model=__magic_name__ , distributed=__magic_name__ , fpaa=__magic_name__ , )
@parameterized.expand(__magic_name__ , name_func=__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> str:
self.run_and_check(
stage=__magic_name__ , model=__magic_name__ , distributed=__magic_name__ , fpaa=__magic_name__ , )
@require_torch_multi_gpu
@parameterized.expand(__magic_name__ , name_func=__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> Tuple:
self.run_and_check(
stage=__magic_name__ , model=__magic_name__ , distributed=__magic_name__ , fpaa=__magic_name__ , )
def __UpperCAmelCase ( self , __magic_name__ ) -> Optional[int]:
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ = 10 , __magic_name__ = True , __magic_name__ = True , __magic_name__ = True , ) -> Optional[Any]:
_a = models[model]
_a = self.run_trainer(
stage=__magic_name__ , model_name=__magic_name__ , eval_steps=__magic_name__ , num_train_epochs=1 , distributed=__magic_name__ , fpaa=__magic_name__ , )
self.do_checks(__magic_name__ )
return output_dir
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ = 10 , __magic_name__ = 1 , __magic_name__ = True , __magic_name__ = True , ) -> str:
_a = self.get_auto_remove_tmp_dir('./xxx' , after=__magic_name__ )
_a = f'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(__magic_name__ )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_a = f'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split()
_a = [f'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py']
_a = self.get_launcher(__magic_name__ )
_a = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__magic_name__ , env=self.get_env() )
return output_dir
def __UpperCAmelCase ( self , __magic_name__=False ) -> Optional[Any]:
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
_a = min(2 , get_gpu_count() ) if distributed else 1
return f'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split()
| 532
|
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=13_37 , num_examples=42 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=13_37 , num_examples=42 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def _A (lowerCAmelCase__ :SplitDict ) -> Any:
'''simple docstring'''
_a = split_dict._to_yaml_list()
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ )
_a = SplitDict._from_yaml_list(lowerCAmelCase__ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
_a = None
# the split name of split_dict takes over the name of the split info object
_a = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=lowerCAmelCase__ ), SplitInfo(dataset_name='my_dataset' )] )
def _A (lowerCAmelCase__ :Optional[Any] ) -> List[str]:
'''simple docstring'''
_a = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 532
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase ) -> str:
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 560
|
"""simple docstring"""
from heapq import heappop, heappush
import numpy as np
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> tuple[float | int, list[tuple[int, int]]]:
lowercase__ , lowercase__ : Optional[Any] = grid.shape
lowercase__ : List[str] = [-1, 1, 0, 0]
lowercase__ : List[str] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowercase__ , lowercase__ : List[str] = [(0, source)], set()
lowercase__ : List[str] = np.full((rows, cols) , np.inf )
lowercase__ : Optional[int] = 0
lowercase__ : str = np.empty((rows, cols) , dtype=__lowerCamelCase )
lowercase__ : Optional[int] = None
while queue:
((lowercase__) , (lowercase__)) : Tuple = heappop(__lowerCamelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowercase__ : Union[str, Any] = []
while (x, y) != source:
path.append((x, y) )
lowercase__ , lowercase__ : Union[str, Any] = predecessors[x, y]
path.append(__lowerCamelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(__lowerCamelCase ) ):
lowercase__ , lowercase__ : Any = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowercase__ : Tuple = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(__lowerCamelCase , (dist + 1, (nx, ny)) )
lowercase__ : Optional[Any] = dist + 1
lowercase__ : Optional[Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 560
| 1
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''',
'''BridgeTower/bridgetower-base-itm-mlm''': (
'''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'''
),
}
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple = "bridgetower_vision_model"
def __init__( self : List[str] , UpperCAmelCase_ : int=768 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : Dict=16 , UpperCAmelCase_ : Optional[Any]=288 , UpperCAmelCase_ : int=1 , UpperCAmelCase_ : Tuple=1E-0_5 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Dict=False , **UpperCAmelCase_ : List[Any] , ) -> int:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_channels
_lowerCAmelCase = patch_size
_lowerCAmelCase = image_size
_lowerCAmelCase = initializer_factor
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = stop_gradient
_lowerCAmelCase = share_layernorm
_lowerCAmelCase = remove_last_layer
@classmethod
def __lowerCamelCase ( cls : Tuple , UpperCAmelCase_ : Union[str, os.PathLike] , **UpperCAmelCase_ : Optional[Any] ) -> "PretrainedConfig":
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = cls.get_config_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
if config_dict.get('model_type' ) == "bridgetower":
_lowerCAmelCase = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple = "bridgetower_text_model"
def __init__( self : str , UpperCAmelCase_ : Optional[Any]=50_265 , UpperCAmelCase_ : Dict=768 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : Dict=12 , UpperCAmelCase_ : Tuple=1 , UpperCAmelCase_ : str=3_072 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : str=514 , UpperCAmelCase_ : List[str]=1 , UpperCAmelCase_ : Optional[int]=1E-0_5 , UpperCAmelCase_ : Optional[int]=1 , UpperCAmelCase_ : List[str]=0 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : Dict="absolute" , UpperCAmelCase_ : int=True , **UpperCAmelCase_ : Tuple , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_act
_lowerCAmelCase = initializer_factor
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = position_embedding_type
_lowerCAmelCase = use_cache
_lowerCAmelCase = pad_token_id
_lowerCAmelCase = bos_token_id
_lowerCAmelCase = eos_token_id
@classmethod
def __lowerCamelCase ( cls : Union[str, Any] , UpperCAmelCase_ : Union[str, os.PathLike] , **UpperCAmelCase_ : List[str] ) -> "PretrainedConfig":
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = cls.get_config_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
if config_dict.get('model_type' ) == "bridgetower":
_lowerCAmelCase = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] = "bridgetower"
def __init__( self : Tuple , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : Union[str, Any]=768 , UpperCAmelCase_ : List[str]=1 , UpperCAmelCase_ : List[str]=1E-0_5 , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : Optional[int]="add" , UpperCAmelCase_ : List[Any]=12 , UpperCAmelCase_ : Any=6 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : List[str]=None , **UpperCAmelCase_ : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase = kwargs.pop('text_config_dict' , UpperCAmelCase_ )
_lowerCAmelCase = kwargs.pop('vision_config_dict' , UpperCAmelCase_ )
super().__init__(**UpperCAmelCase_ )
_lowerCAmelCase = share_cross_modal_transformer_layers
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_size
_lowerCAmelCase = initializer_factor
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = share_link_tower_layers
_lowerCAmelCase = link_tower_type
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = tie_word_embeddings
_lowerCAmelCase = init_layernorm_from_vision_encoder
if text_config is None:
_lowerCAmelCase = {}
logger.info('`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.' )
if vision_config is None:
_lowerCAmelCase = {}
logger.info('`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.' )
_lowerCAmelCase = BridgeTowerTextConfig(**UpperCAmelCase_ )
_lowerCAmelCase = BridgeTowerVisionConfig(**UpperCAmelCase_ )
@classmethod
def __lowerCamelCase ( cls : List[Any] , UpperCAmelCase_ : BridgeTowerTextConfig , UpperCAmelCase_ : BridgeTowerVisionConfig , **UpperCAmelCase_ : List[Any] ) -> List[str]:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCAmelCase_ )
def __lowerCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = copy.deepcopy(self.__dict__ )
_lowerCAmelCase = self.text_config.to_dict()
_lowerCAmelCase = self.vision_config.to_dict()
_lowerCAmelCase = self.__class__.model_type
return output
| 707
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
'''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''],
'''tokenization_roformer''': ['''RoFormerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''RoFormerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoFormerForCausalLM''',
'''RoFormerForMaskedLM''',
'''RoFormerForMultipleChoice''',
'''RoFormerForQuestionAnswering''',
'''RoFormerForSequenceClassification''',
'''RoFormerForTokenClassification''',
'''RoFormerLayer''',
'''RoFormerModel''',
'''RoFormerPreTrainedModel''',
'''load_tf_weights_in_roformer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRoFormerForCausalLM''',
'''TFRoFormerForMaskedLM''',
'''TFRoFormerForMultipleChoice''',
'''TFRoFormerForQuestionAnswering''',
'''TFRoFormerForSequenceClassification''',
'''TFRoFormerForTokenClassification''',
'''TFRoFormerLayer''',
'''TFRoFormerModel''',
'''TFRoFormerPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxRoFormerForMaskedLM''',
'''FlaxRoFormerForMultipleChoice''',
'''FlaxRoFormerForQuestionAnswering''',
'''FlaxRoFormerForSequenceClassification''',
'''FlaxRoFormerForTokenClassification''',
'''FlaxRoFormerModel''',
'''FlaxRoFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 491
| 0
|
'''simple docstring'''
def lowerCamelCase__ ( a__) -> list[int]:
"""simple docstring"""
_snake_case : Any = len(_lowercase)
for i in range(_lowercase):
for j in range(i + 1 , _lowercase):
if numbers[j] < numbers[i]:
_snake_case , _snake_case : Union[str, Any] = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = input("Enter numbers separated by a comma:\n").strip()
SCREAMING_SNAKE_CASE_ = [int(item) for item in user_input.split(",")]
print(exchange_sort(unsorted))
| 517
|
"""simple docstring"""
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
lowercase_ = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
lowercase_ = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
lowercase_ = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
lowercase_ = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
lowercase_ = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]),
('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [14, 13, 12, 11, 10]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
lowercase_ = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
lowercase_ = (
('JH AH TH KH QH', 23),
('JH 9H TH KH QH', 22),
('JC KH JS JD JH', 21),
('KH KC 3S 3H 3D', 20),
('8C 9C 5C 3C TC', 19),
('JS QS 9H TS KH', 18),
('7C 7S KH 2H 7H', 17),
('3C KH 5D 5S KH', 16),
('QH 8H KD JH 8S', 15),
('2D 6D 9D TH 7D', 14),
)
def UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ = randrange(len(_lowercase ) ), randrange(len(_lowercase ) )
lowerCAmelCase_ = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)]
lowerCAmelCase_ , lowerCAmelCase_ = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def UpperCAmelCase ( _lowercase : int = 1_0_0 ) -> int:
"""simple docstring"""
return (generate_random_hand() for _ in range(_lowercase ))
@pytest.mark.parametrize('''hand, expected''' , _lowercase )
def UpperCAmelCase ( _lowercase : Union[str, Any] , _lowercase : Optional[int] ) -> str:
"""simple docstring"""
assert PokerHand(_lowercase )._is_flush() == expected
@pytest.mark.parametrize('''hand, expected''' , _lowercase )
def UpperCAmelCase ( _lowercase : List[Any] , _lowercase : List[str] ) -> Optional[Any]:
"""simple docstring"""
assert PokerHand(_lowercase )._is_straight() == expected
@pytest.mark.parametrize('''hand, expected, card_values''' , _lowercase )
def UpperCAmelCase ( _lowercase : str , _lowercase : Any , _lowercase : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCAmelCase_ = PokerHand(_lowercase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('''hand, expected''' , _lowercase )
def UpperCAmelCase ( _lowercase : Any , _lowercase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
assert PokerHand(_lowercase )._is_same_kind() == expected
@pytest.mark.parametrize('''hand, expected''' , _lowercase )
def UpperCAmelCase ( _lowercase : Tuple , _lowercase : int ) -> str:
"""simple docstring"""
assert PokerHand(_lowercase )._hand_type == expected
@pytest.mark.parametrize('''hand, other, expected''' , _lowercase )
def UpperCAmelCase ( _lowercase : int , _lowercase : str , _lowercase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
assert PokerHand(_lowercase ).compare_with(PokerHand(_lowercase ) ) == expected
@pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() )
def UpperCAmelCase ( _lowercase : Any , _lowercase : Dict , _lowercase : Dict ) -> Union[str, Any]:
"""simple docstring"""
assert PokerHand(_lowercase ).compare_with(PokerHand(_lowercase ) ) == expected
def UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase_ = [PokerHand(_lowercase ) for hand in SORTED_HANDS]
lowerCAmelCase_ = poker_hands.copy()
shuffle(_lowercase )
lowerCAmelCase_ = chain(sorted(_lowercase ) )
for index, hand in enumerate(_lowercase ):
assert hand == poker_hands[index]
def UpperCAmelCase ( ) -> Any:
"""simple docstring"""
lowerCAmelCase_ = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )]
pokerhands.sort(reverse=_lowercase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase_ = PokerHand('''2C 4S AS 3D 5C''' )
lowerCAmelCase_ = True
lowerCAmelCase_ = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ = 0
lowerCAmelCase_ = os.path.abspath(os.path.dirname(_lowercase ) )
lowerCAmelCase_ = os.path.join(_lowercase , '''poker_hands.txt''' )
with open(_lowercase ) as file_hand:
for line in file_hand:
lowerCAmelCase_ = line[:1_4].strip()
lowerCAmelCase_ = line[1_5:].strip()
lowerCAmelCase_ , lowerCAmelCase_ = PokerHand(_lowercase ), PokerHand(_lowercase )
lowerCAmelCase_ = player.compare_with(_lowercase )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 552
| 0
|
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def _snake_case ( _snake_case : str , _snake_case : List[Any]=7 ):
lowerCAmelCase : Any = None
if token is not None:
lowerCAmelCase : Any = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
lowerCAmelCase : Union[str, Any] = '''636036'''
lowerCAmelCase : Tuple = f'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
lowerCAmelCase : int = requests.get(_snake_case , headers=_snake_case ).json()
return result["workflow_runs"]
def _snake_case ( _snake_case : Union[str, Any] ):
lowerCAmelCase : int = get_daily_ci_runs(_snake_case )
lowerCAmelCase : Dict = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowerCAmelCase : List[Any] = workflow_run['''id''']
break
return workflow_run_id
def _snake_case ( _snake_case : List[Any] , _snake_case : Tuple , _snake_case : Any ):
lowerCAmelCase : Tuple = get_last_daily_ci_runs(_snake_case )
if workflow_run_id is not None:
lowerCAmelCase : List[Any] = get_artifacts_links(worflow_run_id=_snake_case , token=_snake_case )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowerCAmelCase : Optional[int] = artifacts_links[artifact_name]
download_artifact(
artifact_name=_snake_case , artifact_url=_snake_case , output_dir=_snake_case , token=_snake_case )
def _snake_case ( _snake_case : Optional[int] , _snake_case : int , _snake_case : Optional[Any] ):
get_last_daily_ci_artifacts(_snake_case , _snake_case , _snake_case )
lowerCAmelCase : List[Any] = {}
for artifact_name in artifact_names:
lowerCAmelCase : Union[str, Any] = os.path.join(_snake_case , f'''{artifact_name}.zip''' )
if os.path.isfile(_snake_case ):
lowerCAmelCase : Dict = {}
with zipfile.ZipFile(_snake_case ) as z:
for filename in z.namelist():
if not os.path.isdir(_snake_case ):
# read the file
with z.open(_snake_case ) as f:
lowerCAmelCase : List[Any] = f.read().decode('''UTF-8''' )
return results
| 709
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def _snake_case ( _snake_case : Tuple , _snake_case : Union[str, Any]=10 ):
lowerCAmelCase : Dict = []
for _ in range(_snake_case ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def _snake_case ( _snake_case : Optional[int] , _snake_case : int=10 ):
lowerCAmelCase : Optional[int] = []
for step in range(_snake_case ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase : List[str] = os.path.join(_snake_case , '''schedule.bin''' )
torch.save(scheduler.state_dict() , _snake_case )
lowerCAmelCase : List[Any] = torch.load(_snake_case )
scheduler.load_state_dict(_snake_case )
return lrs
@require_torch
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Any ):
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for a, b in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertAlmostEqual(UpperCamelCase_ , UpperCamelCase_ , delta=UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Any = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase_ )
lowerCAmelCase : List[str] = torch.tensor([0.4, 0.2, -0.5] )
lowerCAmelCase : List[Any] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowerCAmelCase : Union[str, Any] = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(1_0_0 ):
lowerCAmelCase : Union[str, Any] = criterion(UpperCamelCase_ , UpperCamelCase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Tuple = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = torch.tensor([0.4, 0.2, -0.5] )
lowerCAmelCase : Optional[int] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowerCAmelCase : Any = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCamelCase_ , weight_decay=0.0 , relative_step=UpperCamelCase_ , scale_parameter=UpperCamelCase_ , warmup_init=UpperCamelCase_ , )
for _ in range(1_0_0_0 ):
lowerCAmelCase : List[Any] = criterion(UpperCamelCase_ , UpperCamelCase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class snake_case_( unittest.TestCase ):
__UpperCamelCase = nn.Linear(50 , 50 ) if is_torch_available() else None
__UpperCamelCase = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
__UpperCamelCase = 10
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any]=None ):
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for a, b in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertAlmostEqual(UpperCamelCase_ , UpperCamelCase_ , delta=UpperCamelCase_ , msg=UpperCamelCase_ )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Tuple = {'''num_warmup_steps''': 2, '''num_training_steps''': 1_0}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
lowerCAmelCase : Optional[Any] = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'''num_warmup_steps''': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, '''num_cycles''': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, '''power''': 2.0, '''lr_end''': 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'''num_warmup_steps''': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = data
lowerCAmelCase : List[Any] = scheduler_func(self.optimizer , **UpperCamelCase_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
lowerCAmelCase : str = unwrap_schedule(UpperCamelCase_ , self.num_steps )
self.assertListAlmostEqual(
UpperCamelCase_ , UpperCamelCase_ , tol=1E-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , )
lowerCAmelCase : Optional[int] = scheduler_func(self.optimizer , **UpperCamelCase_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(UpperCamelCase_ ) # wrap to test picklability of the schedule
lowerCAmelCase : List[Any] = unwrap_and_save_reload_schedule(UpperCamelCase_ , self.num_steps )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ , msg=F'''failed for {scheduler_func} in save and reload''' )
class snake_case_:
def __init__( self : List[Any] , UpperCamelCase_ : Any ):
lowerCAmelCase : Tuple = fn
def __call__( self : Union[str, Any] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : List[Any] ):
return self.fn(*UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Optional[int] ):
lowerCAmelCase : Union[str, Any] = list(map(self , scheduler.lr_lambdas ) )
| 637
| 0
|
'''simple docstring'''
def _a ( _lowerCamelCase = 100 ) -> int:
"""simple docstring"""
__snake_case : Any = n * (n + 1) * (2 * n + 1) / 6
__snake_case : List[Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26
|
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __A ( __lowerCamelCase , __lowerCamelCase=None ) -> Tuple:
a = None
if token is not None:
a = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
a = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
a = requests.get(__lowerCamelCase , headers=__lowerCamelCase ).json()
a = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
a = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(__lowerCamelCase ):
a = requests.get(url + f'&page={i + 2}' , headers=__lowerCamelCase ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def __A ( __lowerCamelCase , __lowerCamelCase=None ) -> Dict:
a = None
if token is not None:
a = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
a = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'
a = requests.get(__lowerCamelCase , headers=__lowerCamelCase ).json()
a = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
a = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(__lowerCamelCase ):
a = requests.get(url + f'&page={i + 2}' , headers=__lowerCamelCase ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
a = None
if token is not None:
a = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
a = requests.get(__lowerCamelCase , headers=__lowerCamelCase , allow_redirects=__lowerCamelCase )
a = result.headers["""Location"""]
a = requests.get(__lowerCamelCase , allow_redirects=__lowerCamelCase )
a = os.path.join(__lowerCamelCase , f'{artifact_name}.zip' )
with open(__lowerCamelCase , """wb""" ) as fp:
fp.write(response.content )
def __A ( __lowerCamelCase , __lowerCamelCase=None ) -> Tuple:
a = []
a = []
a = None
with zipfile.ZipFile(__lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(__lowerCamelCase ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__lowerCamelCase ) as f:
for line in f:
a = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
a = line[: line.index(""": """ )]
a = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
a = line[len("""FAILED """ ) :]
failed_tests.append(__lowerCamelCase )
elif filename == "job_name.txt":
a = line
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError(
f'`errors` and `failed_tests` should have the same number of elements. Got {len(__lowerCamelCase )} for `errors` '
f'and {len(__lowerCamelCase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'
""" problem.""" )
a = None
if job_name and job_links:
a = job_links.get(__lowerCamelCase , __lowerCamelCase )
# A list with elements of the form (line of error, error, failed test)
a = [x + [y] + [job_link] for x, y in zip(__lowerCamelCase , __lowerCamelCase )]
return result
def __A ( __lowerCamelCase , __lowerCamelCase=None ) -> Dict:
a = []
a = [os.path.join(__lowerCamelCase , __lowerCamelCase ) for p in os.listdir(__lowerCamelCase ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__lowerCamelCase , job_links=__lowerCamelCase ) )
return errors
def __A ( __lowerCamelCase , __lowerCamelCase=None ) -> Tuple:
a = Counter()
counter.update([x[1] for x in logs] )
a = counter.most_common()
a = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
a = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
a = dict(sorted(r.items() , key=lambda __lowerCamelCase : item[1]["count"] , reverse=__lowerCamelCase ) )
return r
def __A ( __lowerCamelCase ) -> List[str]:
a = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
a = test.split("""/""" )[2]
else:
a = None
return test
def __A ( __lowerCamelCase , __lowerCamelCase=None ) -> Any:
a = [(x[0], x[1], get_model(x[2] )) for x in logs]
a = [x for x in logs if x[2] is not None]
a = {x[2] for x in logs}
a = {}
for test in tests:
a = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
a = counter.most_common()
a = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
a = sum(error_counts.values() )
if n_errors > 0:
a = {"""count""": n_errors, """errors""": error_counts}
a = dict(sorted(r.items() , key=lambda __lowerCamelCase : item[1]["count"] , reverse=__lowerCamelCase ) )
return r
def __A ( __lowerCamelCase ) -> Optional[int]:
a = """| no. | error | status |"""
a = """|-:|:-|:-|"""
a = [header, sep]
for error in reduced_by_error:
a = reduced_by_error[error]["""count"""]
a = f'| {count} | {error[:100]} | |'
lines.append(__lowerCamelCase )
return "\n".join(__lowerCamelCase )
def __A ( __lowerCamelCase ) -> int:
a = """| model | no. of errors | major error | count |"""
a = """|-:|-:|-:|-:|"""
a = [header, sep]
for model in reduced_by_model:
a = reduced_by_model[model]["""count"""]
a , a = list(reduced_by_model[model]["""errors"""].items() )[0]
a = f'| {model} | {count} | {error[:60]} | {_count} |'
lines.append(__lowerCamelCase )
return "\n".join(__lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
__UpperCamelCase : Any = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
__UpperCamelCase : Optional[Any] = get_job_links(args.workflow_run_id, token=args.token)
__UpperCamelCase : str = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
__UpperCamelCase : List[str] = k.find(" / ")
__UpperCamelCase : List[Any] = k[index + len(" / ") :]
__UpperCamelCase : List[str] = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
__UpperCamelCase : Tuple = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
__UpperCamelCase : int = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
__UpperCamelCase : Optional[Any] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
__UpperCamelCase : Any = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
__UpperCamelCase : Union[str, Any] = reduce_by_error(errors)
__UpperCamelCase : Dict = reduce_by_model(errors)
__UpperCamelCase : Union[str, Any] = make_github_table(reduced_by_error)
__UpperCamelCase : Union[str, Any] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 468
| 0
|
from collections import defaultdict
def A_ ( A__ ) -> int:
a__ : str = 1
a__ : List[str] = True
for v in tree[start]:
if v not in visited:
ret += dfs(A__ )
if ret % 2 == 0:
cuts.append(A__ )
return ret
def A_ ( ) -> str:
dfs(1 )
if __name__ == "__main__":
lowercase , lowercase : int = 1_0, 9
lowercase : Union[str, Any] = defaultdict(list)
lowercase : dict[int, bool] = {}
lowercase : list[int] = []
lowercase : Optional[int] = 0
lowercase : int = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (1_0, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 392
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class A__ :
"""simple docstring"""
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=2 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , lowercase=0 , ) -> Dict:
'''simple docstring'''
a__ : str = parent
a__ : int = batch_size
a__ : Optional[int] = seq_length
a__ : Any = is_training
a__ : List[Any] = use_input_mask
a__ : Dict = use_token_type_ids
a__ : str = use_labels
a__ : List[Any] = vocab_size
a__ : List[str] = hidden_size
a__ : int = num_hidden_layers
a__ : Any = num_attention_heads
a__ : List[str] = intermediate_size
a__ : Union[str, Any] = hidden_act
a__ : str = hidden_dropout_prob
a__ : Tuple = attention_probs_dropout_prob
a__ : List[Any] = max_position_embeddings
a__ : List[str] = type_vocab_size
a__ : Union[str, Any] = type_sequence_label_size
a__ : Optional[int] = initializer_range
a__ : Any = num_labels
a__ : List[Any] = num_choices
a__ : Optional[int] = scope
a__ : Tuple = projection_dim
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a__ : List[str] = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
a__ : Tuple = random_attention_mask([self.batch_size, self.seq_length])
a__ : Tuple = None
if self.use_token_type_ids:
a__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a__ : Tuple = None
a__ : List[Any] = None
a__ : Tuple = None
if self.use_labels:
a__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a__ : List[str] = ids_tensor([self.batch_size] , self.num_choices)
a__ : List[Any] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , )
a__ : List[str] = DPRConfig(projection_dim=self.projection_dim , **config.to_dict())
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
a__ : Any = TFDPRContextEncoder(config=lowercase)
a__ : Optional[Any] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase)
a__ : Union[str, Any] = model(lowercase , token_type_ids=lowercase)
a__ : Dict = model(lowercase)
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Optional[int]:
'''simple docstring'''
a__ : str = TFDPRQuestionEncoder(config=lowercase)
a__ : Union[str, Any] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase)
a__ : Optional[Any] = model(lowercase , token_type_ids=lowercase)
a__ : str = model(lowercase)
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Optional[Any]:
'''simple docstring'''
a__ : Dict = TFDPRReader(config=lowercase)
a__ : Tuple = model(lowercase , attention_mask=lowercase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,))
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : Tuple = config_and_inputs
a__ : List[str] = {'input_ids': input_ids}
return config, inputs_dict
@require_tf
class A__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : Dict = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
__A : Tuple = {'''feature-extraction''': TFDPRQuestionEncoder} if is_tf_available() else {}
__A : List[str] = False
__A : Any = False
__A : Optional[Any] = False
__A : Union[str, Any] = False
__A : List[Any] = False
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : Optional[int] = TFDPRModelTester(self)
a__ : Tuple = ConfigTester(self , config_class=lowercase , hidden_size=37)
def __lowercase ( self) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*lowercase)
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*lowercase)
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*lowercase)
@slow
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Optional[Any] = TFDPRContextEncoder.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Optional[Any] = TFDPRContextEncoder.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : int = TFDPRQuestionEncoder.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : str = TFDPRReader.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
@require_tf
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : Any = TFDPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base')
a__ : Tuple = tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 1_0140, 1029, 102]]) # [CLS] hello, is my dog cute? [SEP]
a__ : List[str] = model(lowercase)[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
a__ : int = tf.constant(
[
[
0.03_23_62_53,
0.12_75_33_35,
0.16_81_85_09,
0.00_27_97_86,
0.3_89_69_33,
0.24_26_49_45,
0.2_17_89_71,
-0.02_33_52_27,
-0.08_48_19_59,
-0.14_32_41_17,
]
])
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4))
| 392
| 1
|
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _a ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCAmelCase( self ):
torch.manual_seed(0 )
__A : Dict = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __UpperCAmelCase( self ):
__A : Optional[Any] = self.dummy_uncond_unet
__A : int = KarrasVeScheduler()
__A : Union[str, Any] = KarrasVePipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__A : Tuple = torch.manual_seed(0 )
__A : int = pipe(num_inference_steps=2 , generator=__UpperCAmelCase , output_type="numpy" ).images
__A : Dict = torch.manual_seed(0 )
__A : List[Any] = pipe(num_inference_steps=2 , generator=__UpperCAmelCase , output_type="numpy" , return_dict=__UpperCAmelCase )[0]
__A : Optional[Any] = image[0, -3:, -3:, -1]
__A : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__A : Union[str, Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _a ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase( self ):
__A : Any = "google/ncsnpp-celebahq-256"
__A : List[Any] = UNetaDModel.from_pretrained(__UpperCAmelCase )
__A : Any = KarrasVeScheduler()
__A : Tuple = KarrasVePipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__A : int = torch.manual_seed(0 )
__A : Optional[Any] = pipe(num_inference_steps=20 , generator=__UpperCAmelCase , output_type="numpy" ).images
__A : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__A : str = np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 520
|
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler')
class _a :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = True , __UpperCAmelCase = False ):
__A : str = scheduler
__A : Union[str, Any] = optimizers if isinstance(__UpperCAmelCase , (list, tuple) ) else [optimizers]
__A : Any = split_batches
__A : Tuple = step_with_optimizer
__A : Optional[Any] = GradientState()
def __UpperCAmelCase( self , *__UpperCAmelCase , **__UpperCAmelCase ):
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*__UpperCAmelCase , **__UpperCAmelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*__UpperCAmelCase , **__UpperCAmelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
__A : Optional[Any] = AcceleratorState().num_processes
for _ in range(__UpperCAmelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , "total_steps" ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*__UpperCAmelCase , **__UpperCAmelCase )
else:
self.scheduler.step(*__UpperCAmelCase , **__UpperCAmelCase )
def __UpperCAmelCase( self ):
return self.scheduler.get_last_lr()
def __UpperCAmelCase( self ):
return self.scheduler.state_dict()
def __UpperCAmelCase( self , __UpperCAmelCase ):
self.scheduler.load_state_dict(__UpperCAmelCase )
def __UpperCAmelCase( self ):
return self.scheduler.get_lr()
def __UpperCAmelCase( self , *__UpperCAmelCase , **__UpperCAmelCase ):
return self.scheduler.print_lr(*__UpperCAmelCase , **__UpperCAmelCase )
| 520
| 1
|
def lowerCamelCase__ ( _a = 50):
SCREAMING_SNAKE_CASE : int = [1] * (length + 1)
for row_length in range(length + 1):
for tile_length in range(2 , 5):
for tile_start in range(row_length - tile_length + 1):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 193
|
def lowerCamelCase__ ( _a , _a , _a , _a):
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
SCREAMING_SNAKE_CASE : Union[str, Any] = mf_knapsack(i - 1 , _a , _a , _a)
else:
SCREAMING_SNAKE_CASE : Any = max(
mf_knapsack(i - 1 , _a , _a , _a) , mf_knapsack(i - 1 , _a , _a , j - wt[i - 1]) + val[i - 1] , )
SCREAMING_SNAKE_CASE : Tuple = val
return f[i][j]
def lowerCamelCase__ ( _a , _a , _a , _a):
SCREAMING_SNAKE_CASE : Union[str, Any] = [[0] * (w + 1) for _ in range(n + 1)]
for i in range(1 , n + 1):
for w_ in range(1 , w + 1):
if wt[i - 1] <= w_:
SCREAMING_SNAKE_CASE : Union[str, Any] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_])
else:
SCREAMING_SNAKE_CASE : int = dp[i - 1][w_]
return dp[n][w_], dp
def lowerCamelCase__ ( _a , _a , _a):
if not (isinstance(_a , (list, tuple)) and isinstance(_a , (list, tuple))):
raise ValueError(
"Both the weights and values vectors must be either lists or tuples")
SCREAMING_SNAKE_CASE : int = len(_a)
if num_items != len(_a):
SCREAMING_SNAKE_CASE : List[str] = (
"The number of weights must be the same as the number of values.\n"
f"But got {num_items} weights and {len(_a)} values"
)
raise ValueError(_a)
for i in range(_a):
if not isinstance(wt[i] , _a):
SCREAMING_SNAKE_CASE : int = (
"All weights must be integers but got weight of "
f"type {type(wt[i])} at index {i}"
)
raise TypeError(_a)
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = knapsack(_a , _a , _a , _a)
SCREAMING_SNAKE_CASE : set = set()
_construct_solution(_a , _a , _a , _a , _a)
return optimal_val, example_optional_set
def lowerCamelCase__ ( _a , _a , _a , _a , _a):
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(_a , _a , i - 1 , _a , _a)
else:
optimal_set.add(_a)
_construct_solution(_a , _a , i - 1 , j - wt[i - 1] , _a)
if __name__ == "__main__":
a_ = [3, 2, 4, 4]
a_ = [4, 3, 2, 3]
a_ = 4
a_ = 6
a_ = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
a_ , a_ = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
a_ , a_ = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('optimal_value = ', optimal_solution)
print('An optimal subset corresponding to the optimal value', optimal_subset)
| 193
| 1
|
'''simple docstring'''
def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] ) -> Tuple:
if mass < 0:
raise ValueError('''The mass of a body cannot be negative''' )
return 0.5 * mass * abs(lowerCAmelCase__ ) * abs(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 127
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__SCREAMING_SNAKE_CASE : Any ={
'''configuration_efficientnet''': [
'''EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientNetConfig''',
'''EfficientNetOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] =['''EfficientNetImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] =[
'''EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientNetForImageClassification''',
'''EfficientNetModel''',
'''EfficientNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 428
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
__magic_name__ : Tuple = None
__magic_name__ : Optional[Any] = logging.get_logger(__name__)
__magic_name__ : Optional[Any] = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
__magic_name__ : Optional[int] = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
__magic_name__ : Any = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
__magic_name__ : Optional[int] = """▁"""
# Segments (not really needed)
__magic_name__ : List[str] = 0
__magic_name__ : List[Any] = 1
__magic_name__ : Dict = 2
__magic_name__ : Union[str, Any] = 3
__magic_name__ : Dict = 4
class lowerCamelCase ( __snake_case ):
"""simple docstring"""
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = "left"
lowerCAmelCase_ = XLNetTokenizer
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<unk>" , __UpperCamelCase="<sep>" , __UpperCamelCase="<pad>" , __UpperCamelCase="<cls>" , __UpperCamelCase="<mask>" , __UpperCamelCase=["<eop>", "<eod>"] , **__UpperCamelCase , ):
A_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
vocab_file=UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
A_ = 3
A_ = do_lower_case
A_ = remove_space
A_ = keep_accents
A_ = vocab_file
A_ = False if not self.vocab_file else True
def lowercase_ ( self , __UpperCamelCase , __UpperCamelCase = None ):
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase_ ( self , __UpperCamelCase , __UpperCamelCase = None ):
A_ = [self.sep_token_id]
A_ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase_ ( self , __UpperCamelCase , __UpperCamelCase = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
return (out_vocab_file,)
| 700
|
def lowerCAmelCase ( snake_case__ : list )-> list:
if len(snake_case__ ) <= 1:
return lst
A_ = 1
while i < len(snake_case__ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
A_ , A_ = lst[i], lst[i - 1]
i -= 1
if i == 0:
A_ = 1
return lst
if __name__ == "__main__":
__magic_name__ : Any = input('Enter numbers separated by a comma:\n').strip()
__magic_name__ : Tuple = [int(item) for item in user_input.split(',')]
print(gnome_sort(unsorted))
| 608
| 0
|
from __future__ import annotations
def _A ( _lowercase , _lowercase , _lowercase ) -> int | float:
"""simple docstring"""
if len(_lowercase ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(_lowercase )
or left < -len(_lowercase )
or right >= len(_lowercase )
or right < -len(_lowercase )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
__UpperCamelCase = (left + right) >> 1 # the middle
__UpperCamelCase = find_max(_lowercase , _lowercase , _lowercase ) # find max in range[left, mid]
__UpperCamelCase = find_max(_lowercase , mid + 1 , _lowercase ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 1
|
from typing import Any
class __lowerCamelCase :
def __init__( self: int,A_: Any ):
'''simple docstring'''
__UpperCamelCase = data
__UpperCamelCase = None
def __repr__( self: Any ):
'''simple docstring'''
return F'''Node({self.data})'''
class __lowerCamelCase :
def __init__( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = None
def __iter__( self: int ):
'''simple docstring'''
__UpperCamelCase = self.head
while node:
yield node.data
__UpperCamelCase = node.next
def __len__( self: List[str] ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self: Any ):
'''simple docstring'''
return "->".join([str(A_ ) for item in self] )
def __getitem__( self: int,A_: int ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self: int,A_: int,A_: Any ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
__UpperCamelCase = self.head
for _ in range(A_ ):
__UpperCamelCase = current.next
__UpperCamelCase = data
def snake_case_ ( self: Union[str, Any],A_: Any ):
'''simple docstring'''
self.insert_nth(len(self ),A_ )
def snake_case_ ( self: List[Any],A_: Any ):
'''simple docstring'''
self.insert_nth(0,A_ )
def snake_case_ ( self: Optional[Any],A_: int,A_: Any ):
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
__UpperCamelCase = Node(A_ )
if self.head is None:
__UpperCamelCase = new_node
elif index == 0:
__UpperCamelCase = self.head # link new_node to head
__UpperCamelCase = new_node
else:
__UpperCamelCase = self.head
for _ in range(index - 1 ):
__UpperCamelCase = temp.next
__UpperCamelCase = temp.next
__UpperCamelCase = new_node
def snake_case_ ( self: str ): # print every node data
'''simple docstring'''
print(self )
def snake_case_ ( self: int ):
'''simple docstring'''
return self.delete_nth(0 )
def snake_case_ ( self: str ): # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def snake_case_ ( self: Any,A_: int = 0 ):
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
__UpperCamelCase = self.head # default first node
if index == 0:
__UpperCamelCase = self.head.next
else:
__UpperCamelCase = self.head
for _ in range(index - 1 ):
__UpperCamelCase = temp.next
__UpperCamelCase = temp.next
__UpperCamelCase = temp.next.next
return delete_node.data
def snake_case_ ( self: Any ):
'''simple docstring'''
return self.head is None
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = None
__UpperCamelCase = self.head
while current:
# Store the current node's next node.
__UpperCamelCase = current.next
# Make the current node's next point backwards
__UpperCamelCase = prev
# Make the previous node be the current node
__UpperCamelCase = current
# Make the current node the next node (to progress iteration)
__UpperCamelCase = next_node
# Return prev in order to put the head at the end
__UpperCamelCase = prev
def _A ( ) -> None:
"""simple docstring"""
__UpperCamelCase = LinkedList()
assert linked_list.is_empty() is True
assert str(_lowercase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_lowercase ) == i
linked_list.insert_nth(_lowercase , i + 1 )
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_lowercase ) == 9
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
__UpperCamelCase = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(-8 , 1 ) )
def _A ( ) -> None:
"""simple docstring"""
__UpperCamelCase = [
-9,
1_00,
Node(77_34_51_12 ),
'dlrow olleH',
7,
55_55,
0,
-1_92.5_55_55,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
__UpperCamelCase = LinkedList()
for i in test_input:
linked_list.insert_tail(_lowercase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_lowercase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__UpperCamelCase = linked_list.delete_head()
assert result == -9
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__UpperCamelCase = linked_list.delete_tail()
assert result == 12.2
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__UpperCamelCase = linked_list.delete_nth(10 )
assert result is None
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(_lowercase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_lowercase )
assert (
str(_lowercase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_lowercase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _A ( ) -> List[str]:
"""simple docstring"""
from doctest import testmod
testmod()
__UpperCamelCase = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(_lowercase )
print('\nReading/changing Node data using indexing:' )
print(f'''Element at Position 1: {linked_list[1]}''' )
__UpperCamelCase = input('Enter New Value: ' ).strip()
print('New list:' )
print(_lowercase )
print(f'''length of linked_list is : {len(_lowercase )}''' )
if __name__ == "__main__":
main()
| 1
| 1
|
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {'vocab_file': 'vocab.json'}
_A = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
_A = {'mgp-str': 2_7}
class UpperCAmelCase__ ( _snake_case ):
"""simple docstring"""
A : List[Any] = VOCAB_FILES_NAMES
A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
A : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self , _a , _a="[GO]" , _a="[GO]" , _a="[s]" , _a="[GO]" , **_a ) -> Tuple:
super().__init__(
unk_token=_a , bos_token=_a , eos_token=_a , pad_token=_a , **_a , )
with open(_a , encoding='utf-8' ) as vocab_handle:
lowercase_ : Optional[Any] = json.load(_a )
lowercase_ : Tuple = {v: k for k, v in self.vocab.items()}
@property
def _lowerCamelCase (self ) -> Tuple:
return len(self.vocab )
def _lowerCamelCase (self ) -> Union[str, Any]:
return dict(self.vocab , **self.added_tokens_encoder )
def _lowerCamelCase (self , _a ) -> List[str]:
lowercase_ : Tuple = []
for s in text:
char_tokens.extend(_a )
return char_tokens
def _lowerCamelCase (self , _a ) -> Optional[int]:
return self.vocab.get(_a , self.vocab.get(self.unk_token ) )
def _lowerCamelCase (self , _a ) -> Tuple:
return self.decoder.get(_a )
def _lowerCamelCase (self , _a , _a = None ) -> Tuple[str]:
if not os.path.isdir(_a ):
logger.error('Vocabulary path ({}) should be a directory'.format(_a ) )
return
lowercase_ : List[Any] = os.path.join(
_a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
with open(_a , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=_a , ensure_ascii=_a ) + '\n' )
return (vocab_file,)
| 438
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json',
}
class UpperCAmelCase__ ( _snake_case ):
"""simple docstring"""
A : Any = '''mra'''
def __init__(self , _a=50_265 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=1 , _a=0.02 , _a=1e-5 , _a="absolute" , _a=4 , _a="full" , _a=0 , _a=0 , _a=1 , _a=0 , _a=2 , **_a , ) -> int:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
lowercase_ : Union[str, Any] = vocab_size
lowercase_ : List[str] = max_position_embeddings
lowercase_ : Optional[Any] = hidden_size
lowercase_ : Optional[int] = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Optional[int] = intermediate_size
lowercase_ : Dict = hidden_act
lowercase_ : str = hidden_dropout_prob
lowercase_ : int = attention_probs_dropout_prob
lowercase_ : str = initializer_range
lowercase_ : Union[str, Any] = type_vocab_size
lowercase_ : Any = layer_norm_eps
lowercase_ : Union[str, Any] = position_embedding_type
lowercase_ : Any = block_per_row
lowercase_ : Optional[int] = approx_mode
lowercase_ : int = initial_prior_first_n_blocks
lowercase_ : str = initial_prior_diagonal_n_blocks
| 438
| 1
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def lowercase__( __UpperCamelCase: Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = image.size
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
SCREAMING_SNAKE_CASE : Union[str, Any] = image.resize((w, h) ,resample=PIL_INTERPOLATION['lanczos'] )
SCREAMING_SNAKE_CASE : Any = np.array(__UpperCAmelCase ).astype(np.floataa ) / 2_5_5.0
SCREAMING_SNAKE_CASE : Any = image[None].transpose(0 ,3 ,1 ,2 )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.from_numpy(__UpperCAmelCase )
return 2.0 * image - 1.0
class _a ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, A, A, A, ):
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=_snake_case, unet=_snake_case, scheduler=_snake_case )
@torch.no_grad()
def __call__( self, A = None, A = 1, A = 100, A = 0.0, A = None, A = "pil", A = True, ):
'''simple docstring'''
if isinstance(_snake_case, PIL.Image.Image ):
SCREAMING_SNAKE_CASE : Dict = 1
elif isinstance(_snake_case, torch.Tensor ):
SCREAMING_SNAKE_CASE : Any = image.shape[0]
else:
raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_snake_case )}" )
if isinstance(_snake_case, PIL.Image.Image ):
SCREAMING_SNAKE_CASE : int = preprocess(_snake_case )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
SCREAMING_SNAKE_CASE : List[Any] = (batch_size, self.unet.config.in_channels // 2, height, width)
SCREAMING_SNAKE_CASE : Optional[Any] = next(self.unet.parameters() ).dtype
SCREAMING_SNAKE_CASE : Dict = randn_tensor(_snake_case, generator=_snake_case, device=self.device, dtype=_snake_case )
SCREAMING_SNAKE_CASE : Any = image.to(device=self.device, dtype=_snake_case )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_snake_case, device=self.device )
SCREAMING_SNAKE_CASE : Dict = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
SCREAMING_SNAKE_CASE : Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
SCREAMING_SNAKE_CASE : Optional[Any] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if accepts_eta:
SCREAMING_SNAKE_CASE : Any = eta
for t in self.progress_bar(_snake_case ):
# concat latents and low resolution image in the channel dimension.
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([latents, image], dim=1 )
SCREAMING_SNAKE_CASE : Any = self.scheduler.scale_model_input(_snake_case, _snake_case )
# predict the noise residual
SCREAMING_SNAKE_CASE : List[str] = self.unet(_snake_case, _snake_case ).sample
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.step(_snake_case, _snake_case, _snake_case, **_snake_case ).prev_sample
# decode the image latents with the VQVAE
SCREAMING_SNAKE_CASE : Dict = self.vqvae.decode(_snake_case ).sample
SCREAMING_SNAKE_CASE : str = torch.clamp(_snake_case, -1.0, 1.0 )
SCREAMING_SNAKE_CASE : Optional[int] = image / 2 + 0.5
SCREAMING_SNAKE_CASE : int = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE : int = self.numpy_to_pil(_snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_snake_case )
| 28
|
"""simple docstring"""
import math
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> bool:
SCREAMING_SNAKE_CASE__ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase = 1 / 12_345 ) -> int:
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 3
while True:
SCREAMING_SNAKE_CASE__ = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = int(__UpperCAmelCase )
total_partitions += 1
if check_partition_perfect(__UpperCAmelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__UpperCAmelCase )
integer += 1
if __name__ == "__main__":
print(F'{solution() = }')
| 159
| 0
|
"""simple docstring"""
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE ), F"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
_UpperCAmelCase = F"""The input value of [n={number}] has to be > 0"""
raise ValueError(SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = sylvester(number - 1 )
_UpperCAmelCase = num - 1
_UpperCAmelCase = num
return lower * upper + 1
if __name__ == "__main__":
print(F'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
| 494
|
"""simple docstring"""
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> list:
"""simple docstring"""
_UpperCAmelCase = False
while is_sorted is False: # Until all the indices are traversed keep looping
_UpperCAmelCase = True
for i in range(0,len(SCREAMING_SNAKE_CASE ) - 1,2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
_UpperCAmelCase , _UpperCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCAmelCase = False
for i in range(1,len(SCREAMING_SNAKE_CASE ) - 1,2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
_UpperCAmelCase , _UpperCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCAmelCase = False
return input_list
if __name__ == "__main__":
print('''Enter list to be sorted''')
lowerCAmelCase_ = [int(x) for x in input().split()]
# inputing elements of the list in one line
lowerCAmelCase_ = odd_even_sort(input_list)
print('''The sorted list is''')
print(sorted_list)
| 494
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCamelCase = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66
| 1
|
"""simple docstring"""
import logging
from transformers import PretrainedConfig
a_ = logging.getLogger(__name__)
a_ = {
"""bertabs-finetuned-cnndm""": """https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json""",
}
class A_(SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ : List[str] = """bertabs"""
def __init__( self , A=3_0522 , A=512 , A=6 , A=512 , A=8 , A=512 , A=0.2 , A=6 , A=768 , A=8 , A=2048 , A=0.2 , **A , ):
super().__init__(**A )
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : str = max_pos
_lowerCamelCase : Tuple = enc_layers
_lowerCamelCase : int = enc_hidden_size
_lowerCamelCase : Dict = enc_heads
_lowerCamelCase : List[Any] = enc_ff_size
_lowerCamelCase : List[str] = enc_dropout
_lowerCamelCase : str = dec_layers
_lowerCamelCase : int = dec_hidden_size
_lowerCamelCase : List[Any] = dec_heads
_lowerCamelCase : Optional[int] = dec_ff_size
_lowerCamelCase : Dict = dec_dropout
| 349
|
"""simple docstring"""
from collections import defaultdict
from math import gcd
def UpperCAmelCase_ ( __a : int = 1_50_00_00 ):
'''simple docstring'''
_lowerCamelCase : defaultdict = defaultdict(__a )
_lowerCamelCase : Tuple = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , __a , 2 ):
if gcd(__a , __a ) > 1:
continue
_lowerCamelCase : Optional[int] = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__a , limit + 1 , __a ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"{solution() = }")
| 349
| 1
|
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __magic_name__ :
def __init__( self , __snake_case , __snake_case=13 , __snake_case=3 , __snake_case=True , __snake_case=True , __snake_case=0.1 , __snake_case=0.1 , __snake_case=224 , __snake_case=1000 , __snake_case=[3, 3, 6, 4] , __snake_case=[48, 56, 112, 220] , ) -> Dict:
'''simple docstring'''
__a =parent
__a =batch_size
__a =num_channels
__a =is_training
__a =use_labels
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =num_labels
__a =image_size
__a =layer_depths
__a =embed_dims
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
__a =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a =None
if self.use_labels:
__a =ids_tensor([self.batch_size] , self.num_labels )
__a =self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='gelu' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=SCREAMING_SNAKE_CASE_ , layer_scale_init_value=1e-5 , )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case ) -> List[Any]:
'''simple docstring'''
__a =SwiftFormerModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__a =model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case ) -> Dict:
'''simple docstring'''
__a =self.num_labels
__a =SwiftFormerForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__a =model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
__a =SwiftFormerForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__a =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a =model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
(__a) =self.prepare_config_and_inputs()
__a ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = (
{'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =SwiftFormerModelTester(self )
__a =ConfigTester(
self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='SwiftFormer does not use inputs_embeds' )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
pass
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a =model_class(SCREAMING_SNAKE_CASE_ )
__a =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a =model_class(SCREAMING_SNAKE_CASE_ )
__a =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a =[*signature.parameters.keys()]
__a =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a =SwiftFormerModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason='SwiftFormer does not output attentions' )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def __magic_name__ ( self ) -> str:
'''simple docstring'''
def check_hidden_states_output(__snake_case , __snake_case , __snake_case ):
__a =model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
__a =model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
__a =outputs.hidden_states
__a =8
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
__a =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a =True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a =True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
def _config_zero_init(__snake_case ):
__a =copy.deepcopy(SCREAMING_SNAKE_CASE_ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1e-10 )
if isinstance(getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ):
__a =_config_zero_init(getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return configs_no_init
__a =self.model_tester.prepare_config_and_inputs_for_common()
__a =_config_zero_init(SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
__a =model_class(config=SCREAMING_SNAKE_CASE_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
pass
def UpperCamelCase_( ):
"""simple docstring"""
__a =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self ) -> str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('MBZUAI/swiftformer-xs' ) if is_vision_available() else None
@slow
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
__a =SwiftFormerForImageClassification.from_pretrained('MBZUAI/swiftformer-xs' ).to(SCREAMING_SNAKE_CASE_ )
__a =self.default_image_processor
__a =prepare_img()
__a =image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
__a =model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
__a =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
__a =torch.tensor([[-2.1_703e00, 2.1_107e00, -2.0_811e00]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 242
|
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _A ( pl.LightningModule ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__()
snake_case : Dict = model
snake_case : Optional[int] = 2
snake_case : Optional[Any] = nn.Linear(self.model.config.hidden_size ,self.num_labels )
def snake_case_ ( self ):
'''simple docstring'''
pass
def lowercase ( __A : str , __A : str , __A : str ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = LongformerModel.from_pretrained(__A )
snake_case : Tuple = LightningModel(__A )
snake_case : Optional[int] = torch.load(__A , map_location=torch.device("""cpu""" ) )
lightning_model.load_state_dict(ckpt["""state_dict"""] )
# init longformer question answering model
snake_case : Dict = LongformerForQuestionAnswering.from_pretrained(__A )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__A )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__lowercase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowercase : List[str] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 36
| 0
|
import cva
import numpy as np
class a :
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase__ : float , lowerCamelCase__ : int ) -> Dict:
"""simple docstring"""
if k in (0.0_4, 0.0_6):
__lowercase = k
__lowercase = window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self : int ) -> str:
"""simple docstring"""
return str(self.k )
def UpperCAmelCase_ ( self : Dict , lowerCamelCase__ : str ) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
__lowercase = cva.imread(lowerCamelCase__ , 0 )
__lowercase , __lowercase = img.shape
__lowercase = []
__lowercase = img.copy()
__lowercase = cva.cvtColor(lowerCamelCase__ , cva.COLOR_GRAY2RGB )
__lowercase , __lowercase = np.gradient(lowerCamelCase__ )
__lowercase = dx**2
__lowercase = dy**2
__lowercase = dx * dy
__lowercase = 0.0_4
__lowercase = self.window_size // 2
for y in range(lowerCamelCase__ , h - offset ):
for x in range(lowerCamelCase__ , w - offset ):
__lowercase = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__lowercase = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__lowercase = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__lowercase = (wxx * wyy) - (wxy**2)
__lowercase = wxx + wyy
__lowercase = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
UpperCAmelCase__ = HarrisCorner(0.04, 3)
UpperCAmelCase__ , UpperCAmelCase__ = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 717
|
import cva
import numpy as np
class a :
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase__ : float , lowerCamelCase__ : int ) -> Dict:
"""simple docstring"""
if k in (0.0_4, 0.0_6):
__lowercase = k
__lowercase = window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self : int ) -> str:
"""simple docstring"""
return str(self.k )
def UpperCAmelCase_ ( self : Dict , lowerCamelCase__ : str ) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
__lowercase = cva.imread(lowerCamelCase__ , 0 )
__lowercase , __lowercase = img.shape
__lowercase = []
__lowercase = img.copy()
__lowercase = cva.cvtColor(lowerCamelCase__ , cva.COLOR_GRAY2RGB )
__lowercase , __lowercase = np.gradient(lowerCamelCase__ )
__lowercase = dx**2
__lowercase = dy**2
__lowercase = dx * dy
__lowercase = 0.0_4
__lowercase = self.window_size // 2
for y in range(lowerCamelCase__ , h - offset ):
for x in range(lowerCamelCase__ , w - offset ):
__lowercase = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__lowercase = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__lowercase = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__lowercase = (wxx * wyy) - (wxy**2)
__lowercase = wxx + wyy
__lowercase = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
UpperCAmelCase__ = HarrisCorner(0.04, 3)
UpperCAmelCase__ , UpperCAmelCase__ = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 362
| 0
|
def SCREAMING_SNAKE_CASE ( lowercase_ = 100 ) -> int:
"""simple docstring"""
A__ = (n * (n + 1) // 2) ** 2
A__ = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 87
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__A : Union[str, Any] = {
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = ['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ['LayoutLMv2FeatureExtractor']
__A : Union[str, Any] = ['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = [
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__A : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 575
| 0
|
def lowerCAmelCase_ ( lowercase: int ) -> int:
'''simple docstring'''
_UpperCamelCase: Optional[Any] = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def lowerCAmelCase_ ( lowercase: int = 100 ) -> int:
'''simple docstring'''
_UpperCamelCase: Optional[Any] = 1
_UpperCamelCase: List[Any] = 2
for i in range(2 , max_n + 1 ):
_UpperCamelCase: int = pre_numerator
_UpperCamelCase: Union[str, Any] = 2 * i // 3 if i % 3 == 0 else 1
_UpperCamelCase: List[Any] = cur_numerator
_UpperCamelCase: int = e_cont * pre_numerator + temp
return sum_digits(lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 264
|
import os
import time
import numpy as np
import onnxruntime as ort
UpperCAmelCase_ = '''1'''
UpperCAmelCase_ = '''0'''
UpperCAmelCase_ = '''1'''
UpperCAmelCase_ = ort.SessionOptions()
UpperCAmelCase_ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('''Create inference session...''')
UpperCAmelCase_ = ['''TensorrtExecutionProvider''', '''CUDAExecutionProvider''']
UpperCAmelCase_ = ort.InferenceSession('''model.onnx''', sess_options=sess_opt, providers=execution_provider)
UpperCAmelCase_ = ort.RunOptions()
UpperCAmelCase_ = 1_2_8
UpperCAmelCase_ = 1
UpperCAmelCase_ = np.ones((batch, sequence), dtype=np.intaa)
UpperCAmelCase_ = np.ones((batch, sequence), dtype=np.intaa)
UpperCAmelCase_ = np.ones((batch, sequence), dtype=np.intaa)
print('''Warm up phase...''')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Start inference...''')
UpperCAmelCase_ = time.time()
UpperCAmelCase_ = 2_0_0_0
UpperCAmelCase_ = {}
for iter in range(max_iters):
UpperCAmelCase_ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Average Inference Time = {:.3f} ms'''.format((time.time() - start_time) * 1_0_0_0 / max_iters))
| 264
| 1
|
'''simple docstring'''
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
_a : Optional[Any] = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[str]:
if got_ver is None or want_ver is None:
raise ValueError(
f'Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'
f' reinstalling {pkg}.' )
if not ops[op](version.parse(_snake_case ) , version.parse(_snake_case ) ):
raise ImportError(
f'{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}' )
def _lowerCAmelCase ( lowercase , lowercase = None ) -> None:
__lowerCAmelCase = f'\n{hint}' if hint is not None else ""
# non-versioned check
if re.match(R"""^[\w_\-\d]+$""" , _snake_case ):
__lowerCAmelCase = requirement, None, None
else:
__lowerCAmelCase = re.findall(R"""^([^!=<>\s]+)([\s!=<>]{1,2}.+)""" , _snake_case )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"""
f' got {requirement}' )
__lowerCAmelCase = match[0]
__lowerCAmelCase = want_full.split(""",""" ) # there could be multiple requirements
__lowerCAmelCase = {}
for w in want_range:
__lowerCAmelCase = re.findall(R"""^([\s!=<>]{1,2})(.+)""" , _snake_case )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"""
f' but got {requirement}' )
__lowerCAmelCase = match[0]
__lowerCAmelCase = want_ver
if op not in ops:
raise ValueError(f'{requirement}: need one of {list(ops.keys() )}, but got {op}' )
# special case
if pkg == "python":
__lowerCAmelCase = ".".join([str(_snake_case ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
return
# check if any version is installed
try:
__lowerCAmelCase = importlib.metadata.version(_snake_case )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f'The \'{requirement}\' distribution was not found and is required by this application. {hint}' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
def _lowerCAmelCase ( lowercase ) -> Any:
__lowerCAmelCase = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"
return require_version(_snake_case , _snake_case )
| 689
|
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _snake_case ( snake_case , unittest.TestCase ):
UpperCamelCase__ = DiTPipeline
UpperCamelCase__ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
UpperCamelCase__ = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
UpperCamelCase__ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE ( self ):
torch.manual_seed(0 )
__magic_name__ : Tuple = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_a , activation_fn="gelu-approximate" , num_embeds_ada_norm=1_000 , norm_type="ada_norm_zero" , norm_elementwise_affine=_a , )
__magic_name__ : int = AutoencoderKL()
__magic_name__ : str = DDIMScheduler()
__magic_name__ : int = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def SCREAMING_SNAKE_CASE ( self , _a , _a=0 ):
if str(_a ).startswith("mps" ):
__magic_name__ : str = torch.manual_seed(_a )
else:
__magic_name__ : Optional[Any] = torch.Generator(device=_a ).manual_seed(_a )
__magic_name__ : Dict = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = "cpu"
__magic_name__ : Optional[int] = self.get_dummy_components()
__magic_name__ : Any = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
__magic_name__ : Tuple = self.get_dummy_inputs(_a )
__magic_name__ : Any = pipe(**_a ).images
__magic_name__ : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__magic_name__ : List[Any] = np.array([0.29_46, 0.66_01, 0.43_29, 0.32_96, 0.41_44, 0.53_19, 0.72_73, 0.50_13, 0.44_57] )
__magic_name__ : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_a , 1e-3 )
def SCREAMING_SNAKE_CASE ( self ):
self._test_inference_batch_single_identical(relax_max_difference=_a , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def SCREAMING_SNAKE_CASE ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = torch.manual_seed(0 )
__magic_name__ : Optional[int] = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
__magic_name__ : int = ["vase", "umbrella", "white shark", "white wolf"]
__magic_name__ : str = pipe.get_label_ids(_a )
__magic_name__ : Dict = pipe(_a , generator=_a , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(_a , _a ):
__magic_name__ : int = load_numpy(
f'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-2
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : str = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
__magic_name__ : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
__magic_name__ : List[str] = ["vase", "umbrella"]
__magic_name__ : Any = pipe.get_label_ids(_a )
__magic_name__ : List[str] = torch.manual_seed(0 )
__magic_name__ : Optional[Any] = pipe(_a , generator=_a , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(_a , _a ):
__magic_name__ : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
f'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 124
| 0
|
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
__magic_name__ = datasets.logging.get_logger(__name__)
__magic_name__ = '''\
@inproceedings{bleurt,
title={BLEURT: Learning Robust Metrics for Text Generation},
author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},
booktitle={ACL},
year={2020},
url={https://arxiv.org/abs/2004.04696}
}
'''
__magic_name__ = '''\
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)
and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune
it for your specific application (the latter is expected to perform better).
See the project\'s README at https://github.com/google-research/bleurt#readme for more information.
'''
__magic_name__ = '''
BLEURT score.
Args:
`predictions` (list of str): prediction/candidate sentences
`references` (list of str): reference sentences
`checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.
Returns:
\'scores\': List of scores.
Examples:
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> bleurt = datasets.load_metric("bleurt")
>>> results = bleurt.compute(predictions=predictions, references=references)
>>> print([round(v, 2) for v in results["scores"]])
[1.03, 1.04]
'''
__magic_name__ = {
'''bleurt-tiny-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip''',
'''bleurt-tiny-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip''',
'''bleurt-base-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip''',
'''bleurt-base-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip''',
'''bleurt-large-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip''',
'''bleurt-large-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip''',
'''BLEURT-20-D3''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip''',
'''BLEURT-20-D6''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip''',
'''BLEURT-20-D12''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip''',
'''BLEURT-20''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip''',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self :Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/google-research/bleurt' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/bleurt'] , reference_urls=['https://github.com/google-research/bleurt', 'https://arxiv.org/abs/2004.04696'] , )
def __UpperCAmelCase ( self :str , lowercase__ :int ):
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
'Using default BLEURT-Base checkpoint for sequence maximum length 128. '
'You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').' )
lowercase = 'bleurt-base-128'
if self.config_name.lower() in CHECKPOINT_URLS:
lowercase = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
lowercase = self.config_name.upper()
else:
raise KeyError(
F"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""" )
# download the model checkpoint specified by self.config_name and set up the scorer
lowercase = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
lowercase = score.BleurtScorer(os.path.join(lowercase__ , lowercase__ ) )
def __UpperCAmelCase ( self :Tuple , lowercase__ :List[Any] , lowercase__ :int ):
lowercase = self.scorer.score(references=lowercase__ , candidates=lowercase__ )
return {"scores": scores}
| 314
|
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
def __snake_case ( _UpperCAmelCase ):
"""simple docstring"""
lowercase = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
lowercase = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
lowercase = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowercase = key[key.find('patch_embed' ) + len('patch_embed' )]
lowercase = key.replace(f"""patch_embed{idx}""" , f"""patch_embeddings.{int(_UpperCAmelCase )-1}""" )
if "norm" in key:
lowercase = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowercase = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
lowercase = key.replace(f"""layer_norm{idx}""" , f"""layer_norm.{int(_UpperCAmelCase )-1}""" )
if "layer_norm1" in key:
lowercase = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
lowercase = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
lowercase = key[key.find('block' ) + len('block' )]
lowercase = key.replace(f"""block{idx}""" , f"""block.{int(_UpperCAmelCase )-1}""" )
if "attn.q" in key:
lowercase = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
lowercase = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
lowercase = key.replace('attn' , 'attention.self' )
if "fc1" in key:
lowercase = key.replace('fc1' , 'dense1' )
if "fc2" in key:
lowercase = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
lowercase = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
lowercase = key.replace('linear_fuse.conv' , 'linear_fuse' )
lowercase = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowercase = key[key.find('linear_c' ) + len('linear_c' )]
lowercase = key.replace(f"""linear_c{idx}""" , f"""linear_c.{int(_UpperCAmelCase )-1}""" )
if "bot_conv" in key:
lowercase = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
lowercase = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
lowercase = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
lowercase = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
lowercase = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
lowercase = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
lowercase = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
lowercase = key.replace('module.last_layer_depth' , 'head.head' )
lowercase = value
return new_state_dict
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowercase = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
lowercase = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
lowercase = kv_weight[
: config.hidden_sizes[i], :
]
lowercase = kv_bias[: config.hidden_sizes[i]]
lowercase = kv_weight[
config.hidden_sizes[i] :, :
]
lowercase = kv_bias[config.hidden_sizes[i] :]
def __snake_case ( ):
"""simple docstring"""
lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return image
@torch.no_grad()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=None ):
"""simple docstring"""
lowercase = GLPNConfig(hidden_sizes=[64, 1_28, 3_20, 5_12] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowercase = GLPNImageProcessor()
# prepare image
lowercase = prepare_img()
lowercase = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
lowercase = torch.load(_UpperCAmelCase , map_location=torch.device('cpu' ) )
# rename keys
lowercase = rename_keys(_UpperCAmelCase )
# key and value matrices need special treatment
read_in_k_v(_UpperCAmelCase , _UpperCAmelCase )
# create HuggingFace model and load state dict
lowercase = GLPNForDepthEstimation(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
# forward pass
lowercase = model(_UpperCAmelCase )
lowercase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowercase = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
lowercase = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(f"""Unknown model name: {model_name}""" )
lowercase = torch.Size([1, 4_80, 6_40] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , _UpperCAmelCase , atol=1e-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(_UpperCAmelCase , _UpperCAmelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=_UpperCAmelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(_UpperCAmelCase , _UpperCAmelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=_UpperCAmelCase , )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''',
default=None,
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.'''
)
parser.add_argument(
'''--model_name''',
default='''glpn-kitti''',
type=str,
help='''Name of the model in case you\'re pushing to the hub.''',
)
__magic_name__ = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 314
| 1
|
from typing import TYPE_CHECKING
from ..utils import _LazyModule
lowercase_ = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 354
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __a :
def __init__( self : int , snake_case_ : Dict , snake_case_ : List[str]=12 , snake_case_ : Optional[int]=7 , snake_case_ : Optional[Any]=True , snake_case_ : int=True , snake_case_ : int=True , snake_case_ : Any=99 , snake_case_ : List[Any]=32 , snake_case_ : List[Any]=32 , snake_case_ : Any=2 , snake_case_ : Optional[Any]=4 , snake_case_ : Optional[Any]=37 , snake_case_ : Tuple=0.1 , snake_case_ : str=0.1 , snake_case_ : Any=5_12 , snake_case_ : Optional[Any]=0.0_2 , snake_case_ : List[Any]=0 , snake_case_ : Union[str, Any]=None , )-> List[Any]:
__lowerCAmelCase =parent
__lowerCAmelCase =batch_size
__lowerCAmelCase =seq_length
__lowerCAmelCase =is_training
__lowerCAmelCase =use_input_mask
__lowerCAmelCase =use_labels
__lowerCAmelCase =vocab_size
__lowerCAmelCase =hidden_size
__lowerCAmelCase =projection_dim
__lowerCAmelCase =num_hidden_layers
__lowerCAmelCase =num_attention_heads
__lowerCAmelCase =intermediate_size
__lowerCAmelCase =dropout
__lowerCAmelCase =attention_dropout
__lowerCAmelCase =max_position_embeddings
__lowerCAmelCase =initializer_range
__lowerCAmelCase =scope
__lowerCAmelCase =bos_token_id
def UpperCamelCase ( self : List[Any])-> Optional[int]:
__lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__lowerCAmelCase =None
if self.use_input_mask:
__lowerCAmelCase =random_attention_mask([self.batch_size, self.seq_length])
if input_mask is not None:
__lowerCAmelCase =input_mask.numpy()
__lowerCAmelCase , __lowerCAmelCase =input_mask.shape
__lowerCAmelCase =np.random.randint(1 , seq_length - 1 , size=(batch_size,))
for batch_idx, start_index in enumerate(snake_case_):
__lowerCAmelCase =1
__lowerCAmelCase =0
__lowerCAmelCase =self.get_config()
return config, input_ids, tf.convert_to_tensor(snake_case_)
def UpperCamelCase ( self : List[str])-> str:
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def UpperCamelCase ( self : Union[str, Any] , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : List[Any])-> List[str]:
__lowerCAmelCase =TFBlipTextModel(config=snake_case_)
__lowerCAmelCase =model(snake_case_ , attention_mask=snake_case_ , training=snake_case_)
__lowerCAmelCase =model(snake_case_ , training=snake_case_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCamelCase ( self : Dict)-> Tuple:
__lowerCAmelCase =self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase =config_and_inputs
__lowerCAmelCase ={"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (TFBlipTextModel,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self : Optional[Any])-> Any:
__lowerCAmelCase =BlipTextModelTester(self)
__lowerCAmelCase =ConfigTester(self , config_class=snake_case_ , hidden_size=37)
def UpperCamelCase ( self : List[Any])-> List[str]:
self.config_tester.run_common_tests()
def UpperCamelCase ( self : List[str])-> Dict:
__lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_)
def UpperCamelCase ( self : Optional[Any])-> str:
pass
def UpperCamelCase ( self : List[str])-> int:
pass
@unittest.skip(reason="""Blip does not use inputs_embeds""")
def UpperCamelCase ( self : Optional[Any])-> int:
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""")
def UpperCamelCase ( self : List[Any])-> str:
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""")
def UpperCamelCase ( self : Optional[int])-> Tuple:
pass
@slow
def UpperCamelCase ( self : int)-> str:
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase =TFBlipTextModel.from_pretrained(snake_case_)
self.assertIsNotNone(snake_case_)
def UpperCamelCase ( self : str , snake_case_ : List[str]=True)-> int:
super().test_pt_tf_model_equivalence(allow_missing_keys=snake_case_)
| 354
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
_snake_case : Union[List[PIL.Image.Image], np.ndarray]
_snake_case : Optional[List[bool]]
_snake_case : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 228
|
"""simple docstring"""
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
def __lt__( self : List[str] , A_ : Optional[int] )-> List[str]:
return self[-1] < other[-1]
def __eq__( self : Tuple , A_ : List[Any] )-> Optional[int]:
return self[-1] == other[-1]
def lowercase (_snake_case ) -> list:
'''simple docstring'''
__UpperCamelCase = []
# sort into stacks
for element in collection:
__UpperCamelCase = Stack([element] )
__UpperCamelCase = bisect_left(_snake_case ,_snake_case )
if i != len(_snake_case ):
stacks[i].append(_snake_case )
else:
stacks.append(_snake_case )
# use a heap-based merge to merge stack efficiently
__UpperCamelCase = merge(*(reversed(_snake_case ) for stack in stacks) )
return collection
if __name__ == "__main__":
_A = input("Enter numbers separated by a comma:\n").strip()
_A = [int(item) for item in user_input.split(",")]
print(patience_sort(unsorted))
| 228
| 1
|
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def lowerCamelCase_ ( __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : int ) -> float:
"""simple docstring"""
_A = x
_A = y
for step in range(__UpperCamelCase ): # noqa: B007
_A = a * a - b * b + x
_A = 2 * a * b + y
_A = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def lowerCamelCase_ ( __UpperCamelCase : float ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (2_5_5, 2_5_5, 2_5_5)
def lowerCamelCase_ ( __UpperCamelCase : float ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_5_5 ) for i in colorsys.hsv_to_rgb(__UpperCamelCase , 1 , 1 ) )
def lowerCamelCase_ ( __UpperCamelCase : int = 8_0_0 , __UpperCamelCase : int = 6_0_0 , __UpperCamelCase : float = -0.6 , __UpperCamelCase : float = 0 , __UpperCamelCase : float = 3.2 , __UpperCamelCase : int = 5_0 , __UpperCamelCase : bool = True , ) -> Image.Image:
"""simple docstring"""
_A = Image.new('RGB' , (image_width, image_height) )
_A = img.load()
# loop through the image-coordinates
for image_x in range(__UpperCamelCase ):
for image_y in range(__UpperCamelCase ):
# determine the figure-coordinates based on the image-coordinates
_A = figure_width / image_width * image_height
_A = figure_center_x + (image_x / image_width - 0.5) * figure_width
_A = figure_center_y + (image_y / image_height - 0.5) * figure_height
_A = get_distance(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_A = get_color_coded_rgb(__UpperCamelCase )
else:
_A = get_black_and_white_rgb(__UpperCamelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCAmelCase = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 292
|
'''simple docstring'''
from statistics import mean, stdev
def lowerCamelCase_ ( __UpperCamelCase : list , __UpperCamelCase : int = 3 ) -> list:
"""simple docstring"""
_A = min(__UpperCamelCase )
_A = max(__UpperCamelCase )
# normalize data
return [round((x - x_min) / (x_max - x_min) , __UpperCamelCase ) for x in data]
def lowerCamelCase_ ( __UpperCamelCase : list , __UpperCamelCase : int = 3 ) -> list:
"""simple docstring"""
_A = mean(__UpperCamelCase )
_A = stdev(__UpperCamelCase )
# standardize data
return [round((x - mu) / (sigma) , __UpperCamelCase ) for x in data]
| 292
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __magic_name__ ( unittest.TestCase):
'''simple docstring'''
def __init__( self: Dict , _lowerCamelCase: Dict , _lowerCamelCase: List[str]=7 , _lowerCamelCase: List[str]=3 , _lowerCamelCase: Optional[Any]=18 , _lowerCamelCase: Dict=30 , _lowerCamelCase: List[str]=4_00 , _lowerCamelCase: Any=True , _lowerCamelCase: Union[str, Any]=None , _lowerCamelCase: int=True , ):
SCREAMING_SNAKE_CASE_ = size if size is not None else {'''height''': 18, '''width''': 18}
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = min_resolution
SCREAMING_SNAKE_CASE_ = max_resolution
SCREAMING_SNAKE_CASE_ = do_resize
SCREAMING_SNAKE_CASE_ = size
SCREAMING_SNAKE_CASE_ = apply_ocr
def _A ( self: Union[str, Any] ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __magic_name__ ( __UpperCAmelCase , unittest.TestCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _A ( self: Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = LayoutLMvaImageProcessingTester(self )
@property
def _A ( self: Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _A ( self: Optional[int] ):
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''size''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''apply_ocr''' ) )
def _A ( self: str ):
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def _A ( self: List[Any] ):
pass
def _A ( self: Any ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , _lowerCamelCase )
self.assertIsInstance(encoding.boxes , _lowerCamelCase )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _A ( self: List[Any] ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _A ( self: List[str] ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _A ( self: Any ):
# with apply_OCR = True
SCREAMING_SNAKE_CASE_ = LayoutLMvaImageProcessor()
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
SCREAMING_SNAKE_CASE_ = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
SCREAMING_SNAKE_CASE_ = image_processing(_lowerCamelCase , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
SCREAMING_SNAKE_CASE_ = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
SCREAMING_SNAKE_CASE_ = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _lowerCamelCase )
self.assertListEqual(encoding.boxes , _lowerCamelCase )
# with apply_OCR = False
SCREAMING_SNAKE_CASE_ = LayoutLMvaImageProcessor(apply_ocr=_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = image_processing(_lowerCamelCase , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 720
|
from __future__ import annotations
__SCREAMING_SNAKE_CASE ={
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class __magic_name__ :
'''simple docstring'''
def __init__( self: List[Any] , _lowerCamelCase: dict[str, list[str]] , _lowerCamelCase: str ):
SCREAMING_SNAKE_CASE_ = graph
# mapping node to its parent in resulting breadth first tree
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = source_vertex
def _A ( self: Tuple ):
SCREAMING_SNAKE_CASE_ = {self.source_vertex}
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = [self.source_vertex] # first in first out queue
while queue:
SCREAMING_SNAKE_CASE_ = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = vertex
queue.append(_lowerCamelCase )
def _A ( self: List[str] , _lowerCamelCase: str ):
if target_vertex == self.source_vertex:
return self.source_vertex
SCREAMING_SNAKE_CASE_ = self.parent.get(_lowerCamelCase )
if target_vertex_parent is None:
SCREAMING_SNAKE_CASE_ = (
f"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(_lowerCamelCase )
return self.shortest_path(_lowerCamelCase ) + f"->{target_vertex}"
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 89
| 0
|
import torch
from diffusers import DiffusionPipeline
class _A ( __UpperCamelCase ):
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
def __call__(self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
UpperCamelCase__ = 1
UpperCamelCase__ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
UpperCamelCase__ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase__ = scheduler_output - scheduler_output + torch.ones_like(SCREAMING_SNAKE_CASE_ )
return result
| 415
|
def __UpperCamelCase ( A ):
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
UpperCamelCase__ = gray_code_sequence_string(A )
#
# convert them to integers
for i in range(len(A ) ):
UpperCamelCase__ = int(sequence[i] , 2 )
return sequence
def __UpperCamelCase ( A ):
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
UpperCamelCase__ = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
UpperCamelCase__ = gray_code_sequence_string(bit_count - 1 )
UpperCamelCase__ = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
UpperCamelCase__ = '''0''' + smaller_sequence[i]
sequence.append(A )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
UpperCamelCase__ = '''1''' + smaller_sequence[i]
sequence.append(A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 415
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__magic_name__ = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__magic_name__ = {
"yjernite/retribert-base-uncased": 512,
}
__magic_name__ = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class lowercase ( A__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE = RetriBertTokenizer
__SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""]
def __init__( self , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case=True , _snake_case=None , **_snake_case , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , )
UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _snake_case ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _snake_case ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _snake_case ) != tokenize_chinese_chars
):
UpperCAmelCase = getattr(_snake_case , normalizer_state.pop('''type''' ) )
UpperCAmelCase = do_lower_case
UpperCAmelCase = strip_accents
UpperCAmelCase = tokenize_chinese_chars
UpperCAmelCase = normalizer_class(**_snake_case )
UpperCAmelCase = do_lower_case
def snake_case_ ( self , _snake_case , _snake_case=None ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self , _snake_case , _snake_case = None ) -> List[int]:
"""simple docstring"""
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self , _snake_case , _snake_case = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
| 391
|
import re
import string
import numpy as np
import datasets
__magic_name__ = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n"
__magic_name__ = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n"
__magic_name__ = "\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
'''simple docstring'''
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , reference_urls=[] , )
def snake_case_ ( self , _snake_case , _snake_case , _snake_case=None , _snake_case=False , _snake_case=False , _snake_case=False , ) -> Optional[Any]:
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
UpperCAmelCase = np.array([re.sub(_snake_case , '''''' , _snake_case ) for x in predictions] )
UpperCAmelCase = np.array([re.sub(_snake_case , '''''' , _snake_case ) for x in references] )
else:
UpperCAmelCase = np.asarray(_snake_case )
UpperCAmelCase = np.asarray(_snake_case )
if ignore_case:
UpperCAmelCase = np.char.lower(_snake_case )
UpperCAmelCase = np.char.lower(_snake_case )
if ignore_punctuation:
UpperCAmelCase = string.punctuation.maketrans('''''' , '''''' , string.punctuation )
UpperCAmelCase = np.char.translate(_snake_case , table=_snake_case )
UpperCAmelCase = np.char.translate(_snake_case , table=_snake_case )
if ignore_numbers:
UpperCAmelCase = string.digits.maketrans('''''' , '''''' , string.digits )
UpperCAmelCase = np.char.translate(_snake_case , table=_snake_case )
UpperCAmelCase = np.char.translate(_snake_case , table=_snake_case )
UpperCAmelCase = predictions == references
return {"exact_match": np.mean(_snake_case ) * 100}
| 391
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
a_ :Tuple = None
a_ :Optional[Any] = logging.get_logger(__name__)
a_ :int = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
a_ :List[Any] = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json',
},
}
a_ :Tuple = {
'camembert-base': 5_12,
}
a_ :Dict = '▁'
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Tuple = VOCAB_FILES_NAMES
lowerCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Any = ['''input_ids''', '''attention_mask''']
lowerCamelCase : Tuple = CamembertTokenizer
def __init__( self : int , _lowercase : int=None , _lowercase : List[str]=None , _lowercase : Optional[int]="<s>" , _lowercase : Optional[int]="</s>" , _lowercase : Tuple="</s>" , _lowercase : str="<s>" , _lowercase : Tuple="<unk>" , _lowercase : str="<pad>" , _lowercase : Dict="<mask>" , _lowercase : List[str]=["<s>NOTUSED", "</s>NOTUSED"] , **_lowercase : List[str] , ):
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ : List[str] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
_lowercase , tokenizer_file=_lowercase , bos_token=_lowercase , eos_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , additional_special_tokens=_lowercase , **_lowercase , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = vocab_file
SCREAMING_SNAKE_CASE__ : Optional[Any] = False if not self.vocab_file else True
def lowercase__ ( self : List[str] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self : Dict , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase__ ( self : Any , _lowercase : str , _lowercase : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 35
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowercase__ :Tuple = TypeVar('T')
class snake_case ( Generic[T] ):
'''simple docstring'''
def __init__( self : Optional[int] , __lowercase : T ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = data
__UpperCAmelCase : Node[T] | None = None
def __str__( self : int ):
'''simple docstring'''
return f'''{self.data}'''
class snake_case ( Generic[T] ):
'''simple docstring'''
def __init__( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Node[T] | None = None
def __iter__( self : int ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.top
while node:
yield node.data
__UpperCAmelCase : Dict = node.next
def __str__( self : Any ):
'''simple docstring'''
return "->".join([str(__lowercase ) for item in self] )
def __len__( self : int ):
'''simple docstring'''
return len(tuple(iter(self ) ) )
def A_ ( self : Tuple ):
'''simple docstring'''
return self.top is None
def A_ ( self : List[str] , __lowercase : T ):
'''simple docstring'''
__UpperCAmelCase : int = Node(__lowercase )
if not self.is_empty():
__UpperCAmelCase : int = self.top
__UpperCAmelCase : Tuple = node
def A_ ( self : List[str] ):
'''simple docstring'''
if self.is_empty():
raise IndexError('''pop from empty stack''' )
assert isinstance(self.top , __lowercase )
__UpperCAmelCase : List[str] = self.top
__UpperCAmelCase : List[str] = self.top.next
return pop_node.data
def A_ ( self : str ):
'''simple docstring'''
if self.is_empty():
raise IndexError('''peek from empty stack''' )
assert self.top is not None
return self.top.data
def A_ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : str = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 522
| 0
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowerCamelCase__ : Dict =ShapEPipeline
lowerCamelCase__ : Optional[int] =["prompt"]
lowerCamelCase__ : Optional[Any] =["prompt"]
lowerCamelCase__ : Tuple =[
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
lowerCamelCase__ : Optional[int] =False
@property
def lowercase ( self ) -> Tuple:
"""simple docstring"""
return 32
@property
def lowercase ( self ) -> str:
"""simple docstring"""
return 32
@property
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
return 8
@property
def lowercase ( self ) -> Tuple:
"""simple docstring"""
__magic_name__ : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def lowercase ( self ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
__magic_name__ : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowerCamelCase )
@property
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
__magic_name__ : Union[str, Any] = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
__magic_name__ : Optional[Any] = PriorTransformer(**lowerCamelCase )
return model
@property
def lowercase ( self ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
__magic_name__ : Optional[int] = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
__magic_name__ : Dict = ShapERenderer(**lowerCamelCase )
return model
def lowercase ( self ) -> int:
"""simple docstring"""
__magic_name__ : Any = self.dummy_prior
__magic_name__ : List[str] = self.dummy_text_encoder
__magic_name__ : Optional[Any] = self.dummy_tokenizer
__magic_name__ : Dict = self.dummy_renderer
__magic_name__ : List[Any] = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=lowerCamelCase , clip_sample=lowerCamelCase , clip_sample_range=1.0 , )
__magic_name__ : Any = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def lowercase ( self , lowerCamelCase , lowerCamelCase=0 ) -> Union[str, Any]:
"""simple docstring"""
if str(lowerCamelCase ).startswith('''mps''' ):
__magic_name__ : Optional[int] = torch.manual_seed(lowerCamelCase )
else:
__magic_name__ : Dict = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__magic_name__ : Union[str, Any] = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
__magic_name__ : Optional[Any] = '''cpu'''
__magic_name__ : int = self.get_dummy_components()
__magic_name__ : str = self.pipeline_class(**lowerCamelCase )
__magic_name__ : Optional[Any] = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__magic_name__ : List[str] = pipe(**self.get_dummy_inputs(lowerCamelCase ) )
__magic_name__ : List[str] = output.images[0]
__magic_name__ : int = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__magic_name__ : List[str] = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase ( self ) -> Tuple:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
__magic_name__ : Tuple = torch_device == '''cpu'''
__magic_name__ : List[str] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowerCamelCase , relax_max_difference=lowerCamelCase , )
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
__magic_name__ : List[str] = self.get_dummy_components()
__magic_name__ : Optional[int] = self.pipeline_class(**lowerCamelCase )
__magic_name__ : int = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__magic_name__ : str = 1
__magic_name__ : str = 2
__magic_name__ : Optional[int] = self.get_dummy_inputs(lowerCamelCase )
for key in inputs.keys():
if key in self.batch_params:
__magic_name__ : Optional[int] = batch_size * [inputs[key]]
__magic_name__ : Any = pipe(**lowerCamelCase , num_images_per_prompt=lowerCamelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self ) -> str:
"""simple docstring"""
__magic_name__ : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
__magic_name__ : str = ShapEPipeline.from_pretrained('''openai/shap-e''' )
__magic_name__ : Optional[int] = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__magic_name__ : Tuple = torch.Generator(device=lowerCamelCase ).manual_seed(0 )
__magic_name__ : Dict = pipe(
'''a shark''' , generator=lowerCamelCase , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
| 336
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''facebook/wav2vec2-base-960h''': '''https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json''',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class A__ ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : Tuple ="wav2vec2"
def __init__( self , lowerCamelCase=32 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=0.0_2 , lowerCamelCase=1e-5 , lowerCamelCase="group" , lowerCamelCase="gelu" , lowerCamelCase=(512, 512, 512, 512, 512, 512, 512) , lowerCamelCase=(5, 2, 2, 2, 2, 2, 2) , lowerCamelCase=(10, 3, 3, 3, 3, 2, 2) , lowerCamelCase=False , lowerCamelCase=128 , lowerCamelCase=16 , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=0.0_5 , lowerCamelCase=10 , lowerCamelCase=2 , lowerCamelCase=0.0 , lowerCamelCase=10 , lowerCamelCase=0 , lowerCamelCase=320 , lowerCamelCase=2 , lowerCamelCase=0.1 , lowerCamelCase=100 , lowerCamelCase=256 , lowerCamelCase=256 , lowerCamelCase=0.1 , lowerCamelCase="sum" , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=256 , lowerCamelCase=(512, 512, 512, 512, 1500) , lowerCamelCase=(5, 3, 3, 1, 1) , lowerCamelCase=(1, 2, 3, 1, 1) , lowerCamelCase=512 , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , lowerCamelCase=False , lowerCamelCase=3 , lowerCamelCase=2 , lowerCamelCase=3 , lowerCamelCase=None , lowerCamelCase=None , **lowerCamelCase , ) -> List[str]:
"""simple docstring"""
super().__init__(**lowerCamelCase , pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase )
__magic_name__ : int = hidden_size
__magic_name__ : Optional[int] = feat_extract_norm
__magic_name__ : Union[str, Any] = feat_extract_activation
__magic_name__ : Union[str, Any] = list(lowerCamelCase )
__magic_name__ : Any = list(lowerCamelCase )
__magic_name__ : int = list(lowerCamelCase )
__magic_name__ : List[str] = conv_bias
__magic_name__ : Optional[Any] = num_conv_pos_embeddings
__magic_name__ : Tuple = num_conv_pos_embedding_groups
__magic_name__ : Optional[Any] = len(self.conv_dim )
__magic_name__ : Optional[int] = num_hidden_layers
__magic_name__ : Optional[Any] = intermediate_size
__magic_name__ : int = hidden_act
__magic_name__ : Optional[int] = num_attention_heads
__magic_name__ : Tuple = hidden_dropout
__magic_name__ : Any = attention_dropout
__magic_name__ : Tuple = activation_dropout
__magic_name__ : int = feat_proj_dropout
__magic_name__ : List[str] = final_dropout
__magic_name__ : Tuple = layerdrop
__magic_name__ : str = layer_norm_eps
__magic_name__ : Optional[int] = initializer_range
__magic_name__ : Dict = vocab_size
__magic_name__ : Optional[Any] = do_stable_layer_norm
__magic_name__ : Any = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__magic_name__ : str = apply_spec_augment
__magic_name__ : List[str] = mask_time_prob
__magic_name__ : Optional[int] = mask_time_length
__magic_name__ : int = mask_time_min_masks
__magic_name__ : Optional[Any] = mask_feature_prob
__magic_name__ : List[str] = mask_feature_length
__magic_name__ : List[str] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__magic_name__ : int = num_codevectors_per_group
__magic_name__ : Dict = num_codevector_groups
__magic_name__ : str = contrastive_logits_temperature
__magic_name__ : List[str] = feat_quantizer_dropout
__magic_name__ : Union[str, Any] = num_negatives
__magic_name__ : Tuple = codevector_dim
__magic_name__ : List[str] = proj_codevector_dim
__magic_name__ : Any = diversity_loss_weight
# ctc loss
__magic_name__ : Tuple = ctc_loss_reduction
__magic_name__ : Dict = ctc_zero_infinity
# adapter
__magic_name__ : str = add_adapter
__magic_name__ : List[str] = adapter_kernel_size
__magic_name__ : str = adapter_stride
__magic_name__ : Dict = num_adapter_layers
__magic_name__ : str = output_hidden_size or hidden_size
__magic_name__ : Dict = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__magic_name__ : Dict = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__magic_name__ : Union[str, Any] = list(lowerCamelCase )
__magic_name__ : List[Any] = list(lowerCamelCase )
__magic_name__ : Optional[Any] = list(lowerCamelCase )
__magic_name__ : List[Any] = xvector_output_dim
@property
def lowercase ( self ) -> Any:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 336
| 1
|
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
A_ : List[Any] = logging.get_logger(__name__)
A_ : Optional[Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn.grep_linear": "encoder.layers.*.attention.gru_rel_pos_linear",
"self_attn.relative_attention_bias": "encoder.layers.*.attention.rel_attn_embed",
"self_attn.grep_a": "encoder.layers.*.attention.gru_rel_pos_const",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
A_ : List[Any] = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def UpperCamelCase__ ( __magic_name__ : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Dict ) -> Optional[int]:
'''simple docstring'''
for attribute in key.split(""".""" ):
snake_case__ : Optional[int] = getattr(__magic_name__ , __magic_name__ )
if weight_type is not None:
snake_case__ : Optional[int] = getattr(__magic_name__ , __magic_name__ ).shape
else:
snake_case__ : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
snake_case__ : List[str] = value
elif weight_type == "weight_g":
snake_case__ : Dict = value
elif weight_type == "weight_v":
snake_case__ : Optional[Any] = value
elif weight_type == "bias":
snake_case__ : List[Any] = value
else:
snake_case__ : Optional[Any] = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def UpperCamelCase__ ( __magic_name__ : Any , __magic_name__ : List[Any] ) -> str:
'''simple docstring'''
snake_case__ : List[str] = []
snake_case__ : Any = fairseq_model.state_dict()
snake_case__ : Union[str, Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
snake_case__ : Dict = False
if "conv_layers" in name:
load_conv_layer(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , hf_model.config.feat_extract_norm == """group""" , )
snake_case__ : Dict = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
snake_case__ : List[str] = True
if "*" in mapped_key:
snake_case__ : Union[str, Any] = name.split(__magic_name__ )[0].split(""".""" )[-2]
snake_case__ : List[str] = mapped_key.replace("""*""" , __magic_name__ )
if "weight_g" in name:
snake_case__ : Any = """weight_g"""
elif "weight_v" in name:
snake_case__ : int = """weight_v"""
elif "bias" in name and "relative_attention_bias" not in name:
snake_case__ : Dict = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case__ : Optional[int] = """weight"""
else:
snake_case__ : Tuple = None
set_recursively(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
continue
if not is_used:
unused_weights.append(__magic_name__ )
logger.warning(f"Unused weights: {unused_weights}" )
def UpperCamelCase__ ( __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : Any , __magic_name__ : Tuple ) -> Dict:
'''simple docstring'''
snake_case__ : int = full_name.split("""conv_layers.""" )[-1]
snake_case__ : str = name.split(""".""" )
snake_case__ : Union[str, Any] = int(items[0] )
snake_case__ : int = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
snake_case__ : Dict = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
snake_case__ : Optional[int] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
snake_case__ : Any = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
snake_case__ : Dict = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(__magic_name__ )
@torch.no_grad()
def UpperCamelCase__ ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any]=None ) -> int:
'''simple docstring'''
snake_case__ : Union[str, Any] = torch.load(__magic_name__ )
snake_case__ : str = WavLMConfigOrig(checkpoint["""cfg"""] )
snake_case__ : Tuple = WavLMOrig(__magic_name__ )
model.load_state_dict(checkpoint["""model"""] )
model.eval()
if config_path is not None:
snake_case__ : Union[str, Any] = WavLMConfig.from_pretrained(__magic_name__ )
else:
snake_case__ : Tuple = WavLMConfig()
snake_case__ : str = WavLMModel(__magic_name__ )
recursively_load_weights(__magic_name__ , __magic_name__ )
hf_wavlm.save_pretrained(__magic_name__ )
if __name__ == "__main__":
A_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
A_ : Optional[int] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 38
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 426
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''facebook/nllb-moe-54B''': '''https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json''',
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = """nllb-moe"""
_UpperCamelCase : Any = ["""past_key_values"""]
_UpperCamelCase : Tuple = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , snake_case=12_8112 , snake_case=1024 , snake_case=12 , snake_case=4096 , snake_case=16 , snake_case=12 , snake_case=4096 , snake_case=16 , snake_case=0.05 , snake_case=0.05 , snake_case=True , snake_case=True , snake_case="relu" , snake_case=1024 , snake_case=0.1 , snake_case=0.1 , snake_case=0.0 , snake_case=0.02 , snake_case=2 , snake_case=True , snake_case=False , snake_case="float32" , snake_case=False , snake_case=128 , snake_case=64 , snake_case=4 , snake_case=4 , snake_case=0.001 , snake_case=0.001 , snake_case="all" , snake_case=False , snake_case=False , snake_case=1.0 , snake_case=0.2 , snake_case=1 , snake_case=0 , snake_case=2 , snake_case=False , **snake_case , ):
lowercase = vocab_size
lowercase = max_position_embeddings
lowercase = d_model
lowercase = encoder_ffn_dim
lowercase = encoder_layers
lowercase = encoder_attention_heads
lowercase = decoder_ffn_dim
lowercase = decoder_layers
lowercase = decoder_attention_heads
lowercase = dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = activation_function
lowercase = init_std
lowercase = encoder_layerdrop
lowercase = decoder_layerdrop
lowercase = use_cache
lowercase = encoder_layers
lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase = router_z_loss_coef
lowercase = router_aux_loss_coef
lowercase = decoder_sparse_step
lowercase = encoder_sparse_step
lowercase = num_experts
lowercase = expert_capacity
lowercase = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
lowercase = router_dtype
lowercase = router_ignore_padding_tokens
lowercase = batch_prioritized_routing
lowercase = second_expert_policy
lowercase = normalize_router_prob_before_dropping
lowercase = moe_eval_capacity_token_fraction
lowercase = moe_token_dropout
lowercase = output_router_logits
super().__init__(
pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , is_encoder_decoder=snake_case , decoder_start_token_id=snake_case , **snake_case , )
| 565
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
lowercase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
for i in range(config.num_hidden_layers ):
if base_model:
lowercase = ''
else:
lowercase = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
lowercase = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase = in_proj_weight[
: config.hidden_size, :
]
lowercase = in_proj_bias[: config.hidden_size]
lowercase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase = in_proj_weight[
-config.hidden_size :, :
]
lowercase = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = dct.pop(__SCREAMING_SNAKE_CASE )
lowercase = val
def UpperCAmelCase_ ( ):
lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = ViTConfig()
lowercase = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
lowercase = True
lowercase = int(vit_name[-12:-10] )
lowercase = int(vit_name[-9:-6] )
else:
lowercase = 1000
lowercase = 'huggingface/label-files'
lowercase = 'imagenet-1k-id2label.json'
lowercase = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
lowercase = int(vit_name[-6:-4] )
lowercase = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('tiny' ):
lowercase = 192
lowercase = 768
lowercase = 12
lowercase = 3
elif vit_name[9:].startswith('small' ):
lowercase = 384
lowercase = 1536
lowercase = 12
lowercase = 6
else:
pass
else:
if vit_name[4:].startswith('small' ):
lowercase = 768
lowercase = 2304
lowercase = 8
lowercase = 8
elif vit_name[4:].startswith('base' ):
pass
elif vit_name[4:].startswith('large' ):
lowercase = 1024
lowercase = 4096
lowercase = 24
lowercase = 16
elif vit_name[4:].startswith('huge' ):
lowercase = 1280
lowercase = 5120
lowercase = 32
lowercase = 16
# load original model from timm
lowercase = timm.create_model(__SCREAMING_SNAKE_CASE , pretrained=__SCREAMING_SNAKE_CASE )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowercase = timm_model.state_dict()
if base_model:
remove_classification_head_(__SCREAMING_SNAKE_CASE )
lowercase = create_rename_keys(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
read_in_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowercase = ViTModel(__SCREAMING_SNAKE_CASE ).eval()
else:
lowercase = ViTForImageClassification(__SCREAMING_SNAKE_CASE ).eval()
model.load_state_dict(__SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
lowercase = DeiTImageProcessor(size=config.image_size )
else:
lowercase = ViTImageProcessor(size=config.image_size )
lowercase = image_processor(images=prepare_img() , return_tensors='pt' )
lowercase = encoding['pixel_values']
lowercase = model(__SCREAMING_SNAKE_CASE )
if base_model:
lowercase = timm_model.forward_features(__SCREAMING_SNAKE_CASE )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__SCREAMING_SNAKE_CASE , outputs.pooler_output , atol=1e-3 )
else:
lowercase = timm_model(__SCREAMING_SNAKE_CASE )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__SCREAMING_SNAKE_CASE , outputs.logits , atol=1e-3 )
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_patch16_224''',
type=str,
help='''Name of the ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
UpperCAmelCase = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 565
| 1
|
from __future__ import annotations
from math import pow, sqrt
def __a ( A__ : float , A__ : float , A__ : float ):
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(A__ , 2 ) - pow(A__ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(A__ , 2 ) - pow(A__ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(A__ , 2 ) + pow(A__ , 2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16
|
from collections.abc import Callable
import numpy as np
def __a ( A__ : Callable , A__ : float , A__ : float , A__ : float , A__ : float ):
SCREAMING_SNAKE_CASE = int(np.ceil((x_end - xa) / step_size ) )
SCREAMING_SNAKE_CASE = np.zeros((n + 1,) )
SCREAMING_SNAKE_CASE = ya
SCREAMING_SNAKE_CASE = xa
for k in range(A__ ):
SCREAMING_SNAKE_CASE = y[k] + step_size * ode_func(A__ , y[k] )
SCREAMING_SNAKE_CASE = y[k] + (
(step_size / 2) * (ode_func(A__ , y[k] ) + ode_func(x + step_size , A__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16
| 1
|
"""simple docstring"""
from heapq import heappop, heappush
import numpy as np
def lowercase (snake_case__ : np.ndarray , snake_case__ : tuple[int, int] , snake_case__ : tuple[int, int] , snake_case__ : bool , ) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase = grid.shape
lowerCAmelCase = [-1, 1, 0, 0]
lowerCAmelCase = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowerCAmelCase , lowerCAmelCase = [(0, source)], set()
lowerCAmelCase = np.full((rows, cols) , np.inf )
lowerCAmelCase = 0
lowerCAmelCase = np.empty((rows, cols) , dtype=snake_case__ )
lowerCAmelCase = None
while queue:
((lowerCAmelCase) , (lowerCAmelCase)) = heappop(snake_case__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowerCAmelCase = []
while (x, y) != source:
path.append((x, y) )
lowerCAmelCase , lowerCAmelCase = predecessors[x, y]
path.append(snake_case__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(snake_case__ ) ):
lowerCAmelCase , lowerCAmelCase = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowerCAmelCase = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(snake_case__ , (dist + 1, (nx, ny)) )
lowerCAmelCase = dist + 1
lowerCAmelCase = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 529
|
"""simple docstring"""
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'huggingface/autoformer-tourism-monthly': 'https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = 'autoformer'
_a = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : Dict , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : str = "student_t" , lowerCAmelCase : str = "nll" , lowerCAmelCase : int = 1 , lowerCAmelCase : List[int] = [1, 2, 3, 4, 5, 6, 7] , lowerCAmelCase : bool = True , lowerCAmelCase : int = 0 , lowerCAmelCase : int = 0 , lowerCAmelCase : int = 0 , lowerCAmelCase : int = 0 , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : int = 64 , lowerCAmelCase : int = 2 , lowerCAmelCase : int = 2 , lowerCAmelCase : int = 2 , lowerCAmelCase : int = 2 , lowerCAmelCase : int = 32 , lowerCAmelCase : int = 32 , lowerCAmelCase : str = "gelu" , lowerCAmelCase : float = 0.1 , lowerCAmelCase : float = 0.1 , lowerCAmelCase : float = 0.1 , lowerCAmelCase : float = 0.1 , lowerCAmelCase : float = 0.1 , lowerCAmelCase : int = 100 , lowerCAmelCase : float = 0.02 , lowerCAmelCase : bool = True , lowerCAmelCase : Tuple=True , lowerCAmelCase : int = 10 , lowerCAmelCase : int = 25 , lowerCAmelCase : int = 3 , **lowerCAmelCase : Tuple , ):
# time series specific configuration
lowerCAmelCase = prediction_length
lowerCAmelCase = context_length if context_length is not None else prediction_length
lowerCAmelCase = distribution_output
lowerCAmelCase = loss
lowerCAmelCase = input_size
lowerCAmelCase = num_time_features
lowerCAmelCase = lags_sequence
lowerCAmelCase = scaling
lowerCAmelCase = num_dynamic_real_features
lowerCAmelCase = num_static_real_features
lowerCAmelCase = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
lowerCAmelCase = cardinality
else:
lowerCAmelCase = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
lowerCAmelCase = embedding_dimension
else:
lowerCAmelCase = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCAmelCase = num_parallel_samples
# Transformer architecture configuration
lowerCAmelCase = input_size * len(self.lags_sequence ) + self._number_of_features
lowerCAmelCase = d_model
lowerCAmelCase = encoder_attention_heads
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = encoder_ffn_dim
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = encoder_layers
lowerCAmelCase = decoder_layers
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = encoder_layerdrop
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = use_cache
# Autoformer
lowerCAmelCase = label_length
lowerCAmelCase = moving_average
lowerCAmelCase = autocorrelation_factor
super().__init__(is_encoder_decoder=lowerCAmelCase , **lowerCAmelCase )
@property
def __lowercase ( self : Tuple ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 529
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def _snake_case ( A_ : float , A_ : float , A_ : float ):
"""simple docstring"""
a_ : str = namedtuple("""result""" , """name value""" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("""Only one argument must be 0""" )
elif power < 0:
raise ValueError(
"""Power cannot be negative in any electrical/electronics system""" )
elif voltage == 0:
return result("""voltage""" , power / current )
elif current == 0:
return result("""current""" , power / voltage )
elif power == 0:
return result("""power""" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 577
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( lowerCAmelCase__ ,unittest.TestCase ):
"""simple docstring"""
a_ = ConsistencyModelPipeline
a_ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
a_ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
a_ = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
@property
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Any = UNetaDModel.from_pretrained(
"""diffusers/consistency-models-test""" , subfolder="""test_unet""" , )
return unet
@property
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Any = UNetaDModel.from_pretrained(
"""diffusers/consistency-models-test""" , subfolder="""test_unet_class_cond""" , )
return unet
def _lowerCAmelCase ( self , lowerCAmelCase_=False ):
'''simple docstring'''
if class_cond:
a_ : List[str] = self.dummy_cond_unet
else:
a_ : Dict = self.dummy_uncond_unet
# Default to CM multistep sampler
a_ : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
a_ : str = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=0 ):
'''simple docstring'''
if str(lowerCAmelCase_ ).startswith("""mps""" ):
a_ : int = torch.manual_seed(lowerCAmelCase_ )
else:
a_ : Union[str, Any] = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
a_ : Union[str, Any] = {
"""batch_size""": 1,
"""num_inference_steps""": None,
"""timesteps""": [22, 0],
"""generator""": generator,
"""output_type""": """np""",
}
return inputs
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
a_ : Optional[Any] = self.get_dummy_components()
a_ : Any = ConsistencyModelPipeline(**lowerCAmelCase_ )
a_ : Tuple = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
a_ : Optional[Any] = self.get_dummy_inputs(lowerCAmelCase_ )
a_ : str = pipe(**lowerCAmelCase_ ).images
assert image.shape == (1, 32, 32, 3)
a_ : List[Any] = image[0, -3:, -3:, -1]
a_ : int = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
a_ : Union[str, Any] = self.get_dummy_components(class_cond=lowerCAmelCase_ )
a_ : Tuple = ConsistencyModelPipeline(**lowerCAmelCase_ )
a_ : str = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
a_ : Dict = self.get_dummy_inputs(lowerCAmelCase_ )
a_ : str = 0
a_ : Union[str, Any] = pipe(**lowerCAmelCase_ ).images
assert image.shape == (1, 32, 32, 3)
a_ : int = image[0, -3:, -3:, -1]
a_ : str = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
a_ : List[Any] = self.get_dummy_components()
a_ : Optional[int] = ConsistencyModelPipeline(**lowerCAmelCase_ )
a_ : List[str] = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
a_ : Any = self.get_dummy_inputs(lowerCAmelCase_ )
a_ : List[Any] = 1
a_ : int = None
a_ : str = pipe(**lowerCAmelCase_ ).images
assert image.shape == (1, 32, 32, 3)
a_ : List[Any] = image[0, -3:, -3:, -1]
a_ : List[Any] = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
a_ : str = self.get_dummy_components(class_cond=lowerCAmelCase_ )
a_ : Optional[Any] = ConsistencyModelPipeline(**lowerCAmelCase_ )
a_ : Union[str, Any] = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
a_ : int = self.get_dummy_inputs(lowerCAmelCase_ )
a_ : Any = 1
a_ : Optional[int] = None
a_ : Optional[Any] = 0
a_ : str = pipe(**lowerCAmelCase_ ).images
assert image.shape == (1, 32, 32, 3)
a_ : str = image[0, -3:, -3:, -1]
a_ : int = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self , lowerCAmelCase_=0 , lowerCAmelCase_=False , lowerCAmelCase_="cpu" , lowerCAmelCase_=torch.floataa , lowerCAmelCase_=(1, 3, 64, 64) ):
'''simple docstring'''
a_ : List[str] = torch.manual_seed(lowerCAmelCase_ )
a_ : int = {
"""num_inference_steps""": None,
"""timesteps""": [22, 0],
"""class_labels""": 0,
"""generator""": generator,
"""output_type""": """np""",
}
if get_fixed_latents:
a_ : int = self.get_fixed_latents(seed=lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=lowerCAmelCase_ , shape=lowerCAmelCase_ )
a_ : str = latents
return inputs
def _lowerCAmelCase ( self , lowerCAmelCase_=0 , lowerCAmelCase_="cpu" , lowerCAmelCase_=torch.floataa , lowerCAmelCase_=(1, 3, 64, 64) ):
'''simple docstring'''
if type(lowerCAmelCase_ ) == str:
a_ : Dict = torch.device(lowerCAmelCase_ )
a_ : Dict = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
a_ : Dict = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=lowerCAmelCase_ )
return latents
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Dict = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
a_ : Tuple = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
a_ : List[str] = ConsistencyModelPipeline(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
pipe.to(torch_device=lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
a_ : Union[str, Any] = self.get_inputs()
a_ : List[Any] = pipe(**lowerCAmelCase_ ).images
assert image.shape == (1, 64, 64, 3)
a_ : Dict = image[0, -3:, -3:, -1]
a_ : int = np.array([0.0888, 0.0881, 0.0666, 0.0479, 0.0292, 0.0195, 0.0201, 0.0163, 0.0254] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[Any] = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
a_ : Optional[int] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
a_ : List[Any] = ConsistencyModelPipeline(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
pipe.to(torch_device=lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
a_ : Tuple = self.get_inputs()
a_ : Optional[Any] = 1
a_ : List[str] = None
a_ : Optional[Any] = pipe(**lowerCAmelCase_ ).images
assert image.shape == (1, 64, 64, 3)
a_ : Any = image[0, -3:, -3:, -1]
a_ : Any = np.array([0.0340, 0.0152, 0.0063, 0.0267, 0.0221, 0.0107, 0.0416, 0.0186, 0.0217] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : str = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
a_ : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
a_ : Tuple = ConsistencyModelPipeline(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
pipe.to(torch_device=lowerCAmelCase_ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
a_ : str = self.get_inputs(get_fixed_latents=lowerCAmelCase_ , device=lowerCAmelCase_ )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowerCAmelCase_ , enable_math=lowerCAmelCase_ , enable_mem_efficient=lowerCAmelCase_ ):
a_ : int = pipe(**lowerCAmelCase_ ).images
assert image.shape == (1, 64, 64, 3)
a_ : int = image[0, -3:, -3:, -1]
a_ : Optional[int] = np.array([0.1875, 0.1428, 0.1289, 0.2151, 0.2092, 0.1477, 0.1877, 0.1641, 0.1353] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Tuple = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
a_ : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
a_ : Dict = ConsistencyModelPipeline(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
pipe.to(torch_device=lowerCAmelCase_ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
a_ : List[str] = self.get_inputs(get_fixed_latents=lowerCAmelCase_ , device=lowerCAmelCase_ )
a_ : str = 1
a_ : Dict = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowerCAmelCase_ , enable_math=lowerCAmelCase_ , enable_mem_efficient=lowerCAmelCase_ ):
a_ : Optional[Any] = pipe(**lowerCAmelCase_ ).images
assert image.shape == (1, 64, 64, 3)
a_ : Optional[Any] = image[0, -3:, -3:, -1]
a_ : Optional[int] = np.array([0.1663, 0.1948, 0.2275, 0.1680, 0.1204, 0.1245, 0.1858, 0.1338, 0.2095] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 577
| 1
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__A : List[Any] = r'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n'
@add_start_docstrings(__snake_case )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "rag"
lowerCamelCase__ = True
def __init__( self : str , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : str=True , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : str=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Dict=None , __lowerCamelCase : int=" / " , __lowerCamelCase : Optional[int]=" // " , __lowerCamelCase : List[str]=5 , __lowerCamelCase : Any=300 , __lowerCamelCase : int=768 , __lowerCamelCase : Any=8 , __lowerCamelCase : str="wiki_dpr" , __lowerCamelCase : List[str]="train" , __lowerCamelCase : Dict="compressed" , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Dict=None , __lowerCamelCase : List[str]=False , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Optional[Any]=0.0 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]=False , __lowerCamelCase : str=False , __lowerCamelCase : Any=False , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[Any]=None , **__lowerCamelCase : str , ):
super().__init__(
bos_token_id=_lowercase , pad_token_id=_lowercase , eos_token_id=_lowercase , decoder_start_token_id=_lowercase , forced_eos_token_id=_lowercase , is_encoder_decoder=_lowercase , prefix=_lowercase , vocab_size=_lowercase , **_lowercase , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
SCREAMING_SNAKE_CASE = kwargs.pop("question_encoder" )
SCREAMING_SNAKE_CASE = question_encoder_config.pop("model_type" )
SCREAMING_SNAKE_CASE = kwargs.pop("generator" )
SCREAMING_SNAKE_CASE = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
SCREAMING_SNAKE_CASE = AutoConfig.for_model(_lowercase , **_lowercase )
SCREAMING_SNAKE_CASE = AutoConfig.for_model(_lowercase , **_lowercase )
SCREAMING_SNAKE_CASE = reduce_loss
SCREAMING_SNAKE_CASE = label_smoothing
SCREAMING_SNAKE_CASE = exclude_bos_score
SCREAMING_SNAKE_CASE = do_marginalize
SCREAMING_SNAKE_CASE = title_sep
SCREAMING_SNAKE_CASE = doc_sep
SCREAMING_SNAKE_CASE = n_docs
SCREAMING_SNAKE_CASE = max_combined_length
SCREAMING_SNAKE_CASE = dataset
SCREAMING_SNAKE_CASE = dataset_split
SCREAMING_SNAKE_CASE = index_name
SCREAMING_SNAKE_CASE = retrieval_vector_size
SCREAMING_SNAKE_CASE = retrieval_batch_size
SCREAMING_SNAKE_CASE = passages_path
SCREAMING_SNAKE_CASE = index_path
SCREAMING_SNAKE_CASE = use_dummy_dataset
SCREAMING_SNAKE_CASE = output_retrieved
SCREAMING_SNAKE_CASE = do_deduplication
SCREAMING_SNAKE_CASE = use_cache
if self.forced_eos_token_id is None:
SCREAMING_SNAKE_CASE = getattr(self.generator , "forced_eos_token_id" , _lowercase )
@classmethod
def _snake_case ( cls : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : str , **__lowerCamelCase : Optional[Any] ):
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **_lowercase )
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE = self.question_encoder.to_dict()
SCREAMING_SNAKE_CASE = self.generator.to_dict()
SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 719
|
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
@staticmethod
def _snake_case ( *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Union[str, Any] ):
pass
def __a ( A__ : str ):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__A : Tuple = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _snake_case ( self : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = pipeline(
"document-question-answering" , model=__lowerCamelCase , tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , "" ) ) )
SCREAMING_SNAKE_CASE = "What is the placebo?"
SCREAMING_SNAKE_CASE = [
{
"image": load_image(__lowerCamelCase ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def _snake_case ( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : int ):
SCREAMING_SNAKE_CASE = dqa_pipeline(__lowerCamelCase , top_k=2 )
self.assertEqual(
__lowerCamelCase , [
[
{"score": ANY(__lowerCamelCase ), "answer": ANY(__lowerCamelCase ), "start": ANY(__lowerCamelCase ), "end": ANY(__lowerCamelCase )},
{"score": ANY(__lowerCamelCase ), "answer": ANY(__lowerCamelCase ), "start": ANY(__lowerCamelCase ), "end": ANY(__lowerCamelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = "How many cats are there?"
SCREAMING_SNAKE_CASE = [
{"score": 0.0_001, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.0_001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , __lowerCamelCase )
SCREAMING_SNAKE_CASE = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , __lowerCamelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
SCREAMING_SNAKE_CASE = "./tests/fixtures/tests_samples/COCO/000000039769.png"
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(__lowerCamelCase , [] )
# We can optionnally pass directly the words and bounding boxes
SCREAMING_SNAKE_CASE = "./tests/fixtures/tests_samples/COCO/000000039769.png"
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , words=__lowerCamelCase , boxes=__lowerCamelCase , top_k=2 )
self.assertEqual(__lowerCamelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = "What is the invoice number?"
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = "What is the invoice number?"
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=__lowerCamelCase )
SCREAMING_SNAKE_CASE = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=__lowerCamelCase , revision="3dc6de3" , )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = "What is the invoice number?"
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
]
]
* 2 , )
SCREAMING_SNAKE_CASE = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , "" ) ) )
# This model should also work if `image` is set to None
SCREAMING_SNAKE_CASE = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=__lowerCamelCase )
SCREAMING_SNAKE_CASE = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=__lowerCamelCase , revision="3dc6de3" , max_seq_len=50 , )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = "What is the invoice number?"
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
SCREAMING_SNAKE_CASE = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , "" ) ) )
# This model should also work if `image` is set to None
SCREAMING_SNAKE_CASE = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
] , )
@slow
@require_torch
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = pipeline(
"document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = "What is the invoice number?"
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def _snake_case ( self : List[Any] ):
pass
| 698
| 0
|
import math
class __lowercase :
def _a(self : Dict , snake_case : list[list[float]] , snake_case : list[int] ) -> int:
_lowercase : List[Any] = 0.0
_lowercase : Any = 0.0
for i in range(len(snake_case ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def _a(self : Dict , snake_case : list[list[int | float]] , snake_case : list[int] , snake_case : int , snake_case : float ) -> list[list[int | float]]:
for i in range(len(snake_case ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def UpperCamelCase ( ) -> None:
'''simple docstring'''
_lowercase : Union[str, Any] = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_lowercase : Tuple = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_lowercase : List[Any] = SelfOrganizingMap()
_lowercase : Any = 3
_lowercase : Optional[Any] = 0.5
for _ in range(_UpperCAmelCase ):
for j in range(len(_UpperCAmelCase ) ):
# training sample
_lowercase : Optional[int] = training_samples[j]
# Compute the winning vector
_lowercase : Tuple = self_organizing_map.get_winner(_UpperCAmelCase , _UpperCAmelCase )
# Update the winning vector
_lowercase : str = self_organizing_map.update(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# classify test sample
_lowercase : Any = [0, 0, 0, 1]
_lowercase : Dict = self_organizing_map.get_winner(_UpperCAmelCase , _UpperCAmelCase )
# results
print(f"""Clusters that the test sample belongs to : {winner}""" )
print(f"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main()
| 461
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCamelCase_ : Tuple = logging.get_logger(__name__)
class __lowercase ( __snake_case ):
_A = ["pixel_values"]
def __init__(self : List[str] , snake_case : bool = True , snake_case : Optional[Dict[str, int]] = None , snake_case : PILImageResampling = PILImageResampling.BILINEAR , snake_case : bool = True , snake_case : Dict[str, int] = None , snake_case : bool = True , snake_case : Union[int, float] = 1 / 255 , snake_case : bool = True , snake_case : Optional[Union[float, List[float]]] = None , snake_case : Optional[Union[float, List[float]]] = None , **snake_case : List[Any] , ) -> None:
super().__init__(**snake_case )
_lowercase : List[str] = size if size is not None else {"shortest_edge": 256}
_lowercase : Union[str, Any] = get_size_dict(snake_case , default_to_square=snake_case )
_lowercase : List[str] = crop_size if crop_size is not None else {"height": 224, "width": 224}
_lowercase : Dict = get_size_dict(snake_case , param_name="crop_size" )
_lowercase : List[Any] = do_resize
_lowercase : Optional[Any] = size
_lowercase : Tuple = resample
_lowercase : Tuple = do_center_crop
_lowercase : Any = crop_size
_lowercase : str = do_rescale
_lowercase : int = rescale_factor
_lowercase : List[Any] = do_normalize
_lowercase : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowercase : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _a(self : Tuple , snake_case : np.ndarray , snake_case : Dict[str, int] , snake_case : PILImageResampling = PILImageResampling.BICUBIC , snake_case : Optional[Union[str, ChannelDimension]] = None , **snake_case : int , ) -> np.ndarray:
_lowercase : Union[str, Any] = get_size_dict(snake_case , default_to_square=snake_case )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
_lowercase : int = get_resize_output_image_size(snake_case , size=size["shortest_edge"] , default_to_square=snake_case )
return resize(snake_case , size=snake_case , resample=snake_case , data_format=snake_case , **snake_case )
def _a(self : str , snake_case : np.ndarray , snake_case : Dict[str, int] , snake_case : Optional[Union[str, ChannelDimension]] = None , **snake_case : Union[str, Any] , ) -> np.ndarray:
_lowercase : Union[str, Any] = get_size_dict(snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(snake_case , size=(size["height"], size["width"]) , data_format=snake_case , **snake_case )
def _a(self : Union[str, Any] , snake_case : np.ndarray , snake_case : float , snake_case : Optional[Union[str, ChannelDimension]] = None , **snake_case : List[Any] ) -> np.ndarray:
return rescale(snake_case , scale=snake_case , data_format=snake_case , **snake_case )
def _a(self : int , snake_case : np.ndarray , snake_case : Union[float, List[float]] , snake_case : Union[float, List[float]] , snake_case : Optional[Union[str, ChannelDimension]] = None , **snake_case : Tuple , ) -> np.ndarray:
return normalize(snake_case , mean=snake_case , std=snake_case , data_format=snake_case , **snake_case )
def _a(self : Optional[int] , snake_case : ImageInput , snake_case : Optional[bool] = None , snake_case : Dict[str, int] = None , snake_case : PILImageResampling = None , snake_case : bool = None , snake_case : Dict[str, int] = None , snake_case : Optional[bool] = None , snake_case : Optional[float] = None , snake_case : Optional[bool] = None , snake_case : Optional[Union[float, List[float]]] = None , snake_case : Optional[Union[float, List[float]]] = None , snake_case : Optional[Union[str, TensorType]] = None , snake_case : Union[str, ChannelDimension] = ChannelDimension.FIRST , **snake_case : Tuple , ) -> Union[str, Any]:
_lowercase : Any = do_resize if do_resize is not None else self.do_resize
_lowercase : List[str] = size if size is not None else self.size
_lowercase : Optional[int] = get_size_dict(snake_case , default_to_square=snake_case )
_lowercase : Tuple = resample if resample is not None else self.resample
_lowercase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowercase : int = crop_size if crop_size is not None else self.crop_size
_lowercase : Dict = get_size_dict(snake_case , param_name="crop_size" )
_lowercase : Dict = do_rescale if do_rescale is not None else self.do_rescale
_lowercase : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowercase : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
_lowercase : Dict = image_mean if image_mean is not None else self.image_mean
_lowercase : Optional[Any] = image_std if image_std is not None else self.image_std
_lowercase : int = make_list_of_images(snake_case )
if not valid_images(snake_case ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_lowercase : Tuple = [to_numpy_array(snake_case ) for image in images]
if do_resize:
_lowercase : List[str] = [self.resize(image=snake_case , size=snake_case , resample=snake_case ) for image in images]
if do_center_crop:
_lowercase : List[str] = [self.center_crop(image=snake_case , size=snake_case ) for image in images]
if do_rescale:
_lowercase : Any = [self.rescale(image=snake_case , scale=snake_case ) for image in images]
if do_normalize:
_lowercase : Optional[Any] = [self.normalize(image=snake_case , mean=snake_case , std=snake_case ) for image in images]
_lowercase : Any = [to_channel_dimension_format(snake_case , snake_case ) for image in images]
_lowercase : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=snake_case , tensor_type=snake_case )
def _a(self : Dict , snake_case : List[str] , snake_case : List[Tuple] = None ) -> Optional[Any]:
_lowercase : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(snake_case ) != len(snake_case ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(snake_case ):
_lowercase : Dict = target_sizes.numpy()
_lowercase : Tuple = []
for idx in range(len(snake_case ) ):
_lowercase : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=snake_case )
_lowercase : int = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(snake_case )
else:
_lowercase : Optional[int] = logits.argmax(dim=1 )
_lowercase : int = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 461
| 1
|
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = False ):
'''simple docstring'''
if radian_mode:
return [magnitude * cos(_lowerCAmelCase ), magnitude * sin(_lowerCAmelCase )]
return [magnitude * cos(radians(_lowerCAmelCase ) ), magnitude * sin(radians(_lowerCAmelCase ) )]
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = 1_0**-1 ):
'''simple docstring'''
A_ : NDArray[floataa] = cross(_lowerCAmelCase ,_lowerCAmelCase )
A_ : float = sum(_lowerCAmelCase )
return abs(_lowerCAmelCase ) < eps
if __name__ == "__main__":
# Test to check if it works
_lowerCAmelCase = array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
_lowerCAmelCase = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
_lowerCAmelCase = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
_lowerCAmelCase = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
_lowerCAmelCase = array([[0, -2_000], [0, -1_200], [0, 15_600], [0, -12_400]])
_lowerCAmelCase = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 481
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_lowerCAmelCase = logging.get_logger(__name__)
# General docstring
_lowerCAmelCase = """ResNetConfig"""
# Base docstring
_lowerCAmelCase = """microsoft/resnet-50"""
_lowerCAmelCase = [1, 2_048, 7, 7]
# Image classification docstring
_lowerCAmelCase = """microsoft/resnet-50"""
_lowerCAmelCase = """tiger cat"""
_lowerCAmelCase = [
"""microsoft/resnet-50""",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class _UpperCAmelCase ( nn.Module ):
def __init__( self , a__ , a__ , a__ = 3 , a__ = 1 , a__ = "relu" ):
super().__init__()
A_ : Tuple = nn.Convad(
a__ , a__ , kernel_size=a__ , stride=a__ , padding=kernel_size // 2 , bias=a__ )
A_ : Optional[Any] = nn.BatchNormad(a__ )
A_ : List[Any] = ACTaFN[activation] if activation is not None else nn.Identity()
def _lowerCamelCase ( self , a__ ):
A_ : List[str] = self.convolution(a__ )
A_ : Optional[int] = self.normalization(a__ )
A_ : int = self.activation(a__ )
return hidden_state
class _UpperCAmelCase ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
A_ : Union[str, Any] = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
A_ : Any = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
A_ : Optional[Any] = config.num_channels
def _lowerCamelCase ( self , a__ ):
A_ : Tuple = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
A_ : str = self.embedder(a__ )
A_ : Dict = self.pooler(a__ )
return embedding
class _UpperCAmelCase ( nn.Module ):
def __init__( self , a__ , a__ , a__ = 2 ):
super().__init__()
A_ : Union[str, Any] = nn.Convad(a__ , a__ , kernel_size=1 , stride=a__ , bias=a__ )
A_ : int = nn.BatchNormad(a__ )
def _lowerCamelCase ( self , a__ ):
A_ : List[str] = self.convolution(a__ )
A_ : List[str] = self.normalization(a__ )
return hidden_state
class _UpperCAmelCase ( nn.Module ):
def __init__( self , a__ , a__ , a__ = 1 , a__ = "relu" ):
super().__init__()
A_ : Union[str, Any] = in_channels != out_channels or stride != 1
A_ : List[Any] = (
ResNetShortCut(a__ , a__ , stride=a__ ) if should_apply_shortcut else nn.Identity()
)
A_ : List[str] = nn.Sequential(
ResNetConvLayer(a__ , a__ , stride=a__ ) , ResNetConvLayer(a__ , a__ , activation=a__ ) , )
A_ : Any = ACTaFN[activation]
def _lowerCamelCase ( self , a__ ):
A_ : Any = hidden_state
A_ : Optional[Any] = self.layer(a__ )
A_ : Any = self.shortcut(a__ )
hidden_state += residual
A_ : Optional[Any] = self.activation(a__ )
return hidden_state
class _UpperCAmelCase ( nn.Module ):
def __init__( self , a__ , a__ , a__ = 1 , a__ = "relu" , a__ = 4 ):
super().__init__()
A_ : int = in_channels != out_channels or stride != 1
A_ : List[str] = out_channels // reduction
A_ : Optional[Any] = (
ResNetShortCut(a__ , a__ , stride=a__ ) if should_apply_shortcut else nn.Identity()
)
A_ : Any = nn.Sequential(
ResNetConvLayer(a__ , a__ , kernel_size=1 ) , ResNetConvLayer(a__ , a__ , stride=a__ ) , ResNetConvLayer(a__ , a__ , kernel_size=1 , activation=a__ ) , )
A_ : Dict = ACTaFN[activation]
def _lowerCamelCase ( self , a__ ):
A_ : int = hidden_state
A_ : Dict = self.layer(a__ )
A_ : Optional[Any] = self.shortcut(a__ )
hidden_state += residual
A_ : Dict = self.activation(a__ )
return hidden_state
class _UpperCAmelCase ( nn.Module ):
def __init__( self , a__ , a__ , a__ , a__ = 2 , a__ = 2 , ):
super().__init__()
A_ : List[Any] = ResNetBottleNeckLayer if config.layer_type == """bottleneck""" else ResNetBasicLayer
A_ : Optional[int] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(a__ , a__ , stride=a__ , activation=config.hidden_act ) , *[layer(a__ , a__ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def _lowerCamelCase ( self , a__ ):
A_ : List[Any] = input
for layer in self.layers:
A_ : List[str] = layer(a__ )
return hidden_state
class _UpperCAmelCase ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
A_ : Any = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
a__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
A_ : Any = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(a__ , config.depths[1:] ):
self.stages.append(ResNetStage(a__ , a__ , a__ , depth=a__ ) )
def _lowerCamelCase ( self , a__ , a__ = False , a__ = True ):
A_ : Union[str, Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
A_ : Optional[int] = hidden_states + (hidden_state,)
A_ : str = stage_module(a__ )
if output_hidden_states:
A_ : int = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=a__ , hidden_states=a__ , )
class _UpperCAmelCase ( _lowerCamelCase ):
a = ResNetConfig
a = '''resnet'''
a = '''pixel_values'''
a = True
def _lowerCamelCase ( self , a__ ):
if isinstance(a__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(a__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def _lowerCamelCase ( self , a__ , a__=False ):
if isinstance(a__ , a__ ):
A_ : Dict = value
_lowerCAmelCase = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
_lowerCAmelCase = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' , _lowerCamelCase , )
class _UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , a__ ):
super().__init__(a__ )
A_ : Tuple = config
A_ : List[str] = ResNetEmbeddings(a__ )
A_ : Optional[int] = ResNetEncoder(a__ )
A_ : int = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(a__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=a__ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _lowerCamelCase ( self , a__ , a__ = None , a__ = None ):
A_ : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : Dict = return_dict if return_dict is not None else self.config.use_return_dict
A_ : Any = self.embedder(a__ )
A_ : int = self.encoder(
a__ , output_hidden_states=a__ , return_dict=a__ )
A_ : Optional[Any] = encoder_outputs[0]
A_ : List[str] = self.pooler(a__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=a__ , pooler_output=a__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , _lowerCamelCase , )
class _UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , a__ ):
super().__init__(a__ )
A_ : Optional[Any] = config.num_labels
A_ : Tuple = ResNetModel(a__ )
# classification head
A_ : str = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(a__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=a__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _lowerCamelCase ( self , a__ = None , a__ = None , a__ = None , a__ = None , ):
A_ : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
A_ : Any = self.resnet(a__ , output_hidden_states=a__ , return_dict=a__ )
A_ : List[Any] = outputs.pooler_output if return_dict else outputs[1]
A_ : Dict = self.classifier(a__ )
A_ : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A_ : List[Any] = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A_ : Optional[Any] = """single_label_classification"""
else:
A_ : List[str] = """multi_label_classification"""
if self.config.problem_type == "regression":
A_ : Optional[Any] = MSELoss()
if self.num_labels == 1:
A_ : str = loss_fct(logits.squeeze() , labels.squeeze() )
else:
A_ : Optional[Any] = loss_fct(a__ , a__ )
elif self.config.problem_type == "single_label_classification":
A_ : Optional[Any] = CrossEntropyLoss()
A_ : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A_ : Any = BCEWithLogitsLoss()
A_ : Tuple = loss_fct(a__ , a__ )
if not return_dict:
A_ : Optional[Any] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=a__ , logits=a__ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'''
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
''' , _lowerCamelCase , )
class _UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
def __init__( self , a__ ):
super().__init__(a__ )
super()._init_backbone(a__ )
A_ : Optional[Any] = [config.embedding_size] + config.hidden_sizes
A_ : Any = ResNetEmbeddings(a__ )
A_ : Tuple = ResNetEncoder(a__ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(a__ )
@replace_return_docstrings(output_type=a__ , config_class=_CONFIG_FOR_DOC )
def _lowerCamelCase ( self , a__ , a__ = None , a__ = None ):
A_ : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
A_ : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : Dict = self.embedder(a__ )
A_ : Tuple = self.encoder(a__ , output_hidden_states=a__ , return_dict=a__ )
A_ : Dict = outputs.hidden_states
A_ : Optional[int] = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
A_ : Tuple = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=a__ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=a__ , )
| 481
| 1
|
_a = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def lowerCAmelCase__() -> None:
'''simple docstring'''
lowerCamelCase__ = input('''Enter message: ''' )
lowerCamelCase__ = input('''Enter key [alphanumeric]: ''' )
lowerCamelCase__ = input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
lowerCamelCase__ = '''encrypt'''
lowerCamelCase__ = encrypt_message(__snake_case ,__snake_case )
elif mode.lower().startswith('''d''' ):
lowerCamelCase__ = '''decrypt'''
lowerCamelCase__ = decrypt_message(__snake_case ,__snake_case )
print(F'\n{mode.title()}ed message:' )
print(__snake_case )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> str:
'''simple docstring'''
return translate_message(__snake_case ,__snake_case ,'''encrypt''' )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> str:
'''simple docstring'''
return translate_message(__snake_case ,__snake_case ,'''decrypt''' )
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> str:
'''simple docstring'''
lowerCamelCase__ = []
lowerCamelCase__ = 0
lowerCamelCase__ = key.upper()
for symbol in message:
lowerCamelCase__ = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__snake_case )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__snake_case ):
lowerCamelCase__ = 0
else:
translated.append(__snake_case )
return "".join(__snake_case )
if __name__ == "__main__":
main()
| 481
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def lowerCAmelCase__() -> str:
'''simple docstring'''
lowerCamelCase__ = ArgumentParser('''Accelerate CLI tool''' ,usage='''accelerate <command> [<args>]''' ,allow_abbrev=__snake_case )
lowerCamelCase__ = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=__snake_case )
env_command_parser(subparsers=__snake_case )
launch_command_parser(subparsers=__snake_case )
tpu_command_parser(subparsers=__snake_case )
test_command_parser(subparsers=__snake_case )
# Let's go
lowerCamelCase__ = parser.parse_args()
if not hasattr(__snake_case ,'''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(__snake_case )
if __name__ == "__main__":
main()
| 481
| 1
|
"""simple docstring"""
from __future__ import annotations
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if len(_SCREAMING_SNAKE_CASE ) == 0:
return []
UpperCamelCase , UpperCamelCase = min(_SCREAMING_SNAKE_CASE ), max(_SCREAMING_SNAKE_CASE )
UpperCamelCase = int(max_value - min_value ) + 1
UpperCamelCase = [[] for _ in range(_SCREAMING_SNAKE_CASE )]
for i in my_list:
buckets[int(i - min_value )].append(_SCREAMING_SNAKE_CASE )
return [v for bucket in buckets for v in sorted(_SCREAMING_SNAKE_CASE )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 544
|
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class _lowerCamelCase ( _lowercase ):
def __init__(self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ) -> Dict:
super().__init__()
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=__a , speech_processor=__a , vae=__a , text_encoder=__a , tokenizer=__a , unet=__a , scheduler=__a , feature_extractor=__a , )
def snake_case_ (self , __a = "auto" ) -> str:
if slice_size == "auto":
UpperCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__a )
def snake_case_ (self ) -> List[Any]:
self.enable_attention_slicing(__a )
@torch.no_grad()
def __call__(self , __a , __a=1_60_00 , __a = 5_12 , __a = 5_12 , __a = 50 , __a = 7.5 , __a = None , __a = 1 , __a = 0.0 , __a = None , __a = None , __a = "pil" , __a = True , __a = None , __a = 1 , **__a , ) -> List[str]:
UpperCamelCase = self.speech_processor.feature_extractor(
__a , return_tensors="pt" , sampling_rate=__a ).input_features.to(self.device )
UpperCamelCase = self.speech_model.generate(__a , max_length=48_00_00 )
UpperCamelCase = self.speech_processor.tokenizer.batch_decode(__a , skip_special_tokens=__a , normalize=__a )[
0
]
if isinstance(__a , __a ):
UpperCamelCase = 1
elif isinstance(__a , __a ):
UpperCamelCase = len(__a )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(__a )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__a , __a ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(__a )}." )
# get prompt text embeddings
UpperCamelCase = self.tokenizer(
__a , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
UpperCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
UpperCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCamelCase , UpperCamelCase , UpperCamelCase = text_embeddings.shape
UpperCamelCase = text_embeddings.repeat(1 , __a , 1 )
UpperCamelCase = text_embeddings.view(bs_embed * num_images_per_prompt , __a , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase = 42
if negative_prompt is None:
UpperCamelCase = [""] * batch_size
elif type(__a ) is not type(__a ):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(__a )} !="
F" {type(__a )}." )
elif isinstance(__a , __a ):
UpperCamelCase = [negative_prompt]
elif batch_size != len(__a ):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(__a )}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
UpperCamelCase = negative_prompt
UpperCamelCase = text_input_ids.shape[-1]
UpperCamelCase = self.tokenizer(
__a , padding="max_length" , max_length=__a , truncation=__a , return_tensors="pt" , )
UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase = uncond_embeddings.shape[1]
UpperCamelCase = uncond_embeddings.repeat(1 , __a , 1 )
UpperCamelCase = uncond_embeddings.view(batch_size * num_images_per_prompt , __a , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCamelCase = torch.randn(__a , generator=__a , device="cpu" , dtype=__a ).to(
self.device )
else:
UpperCamelCase = torch.randn(__a , generator=__a , device=self.device , dtype=__a )
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
UpperCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__a )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCamelCase = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase = {}
if accepts_eta:
UpperCamelCase = eta
for i, t in enumerate(self.progress_bar(__a ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase = self.scheduler.scale_model_input(__a , __a )
# predict the noise residual
UpperCamelCase = self.unet(__a , __a , encoder_hidden_states=__a ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase = noise_pred.chunk(2 )
UpperCamelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase = self.scheduler.step(__a , __a , __a , **__a ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__a , __a , __a )
UpperCamelCase = 1 / 0.18215 * latents
UpperCamelCase = self.vae.decode(__a ).sample
UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(__a )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__a , nsfw_content_detected=__a )
| 544
| 1
|
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowercase__ = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
lowercase__ = json.load(f)
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self , UpperCAmelCase_ ):
return FSMTTokenizer.from_pretrained(UpperCAmelCase_ )
def _lowercase ( self , UpperCAmelCase_ ):
snake_case_ = FSMTForConditionalGeneration.from_pretrained(UpperCAmelCase_ ).to(UpperCAmelCase_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ ):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
snake_case_ = f'''facebook/wmt19-{pair}'''
snake_case_ = self.get_tokenizer(UpperCAmelCase_ )
snake_case_ = self.get_model(UpperCAmelCase_ )
snake_case_ = bleu_data[pair]["src"]
snake_case_ = bleu_data[pair]["tgt"]
snake_case_ = tokenizer(UpperCAmelCase_ , return_tensors="pt" , truncation=UpperCAmelCase_ , padding="longest" ).to(UpperCAmelCase_ )
snake_case_ = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
snake_case_ = tokenizer.batch_decode(
UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ )
snake_case_ = calculate_bleu(UpperCAmelCase_ , UpperCAmelCase_ )
print(UpperCAmelCase_ )
self.assertGreaterEqual(scores["bleu"] , UpperCAmelCase_ )
| 508
|
'''simple docstring'''
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def __snake_case ( lowercase : Dict ):
snake_case_ = {}
snake_case_ = job["started_at"]
snake_case_ = job["completed_at"]
snake_case_ = date_parser.parse(lowercase )
snake_case_ = date_parser.parse(lowercase )
snake_case_ = round((end_datetime - start_datetime).total_seconds() / 60.0 )
snake_case_ = start
snake_case_ = end
snake_case_ = duration_in_min
return job_info
def __snake_case ( lowercase : Tuple , lowercase : Dict=None ):
snake_case_ = None
if token is not None:
snake_case_ = {"Accept": "application/vnd.github+json", "Authorization": f'''Bearer {token}'''}
snake_case_ = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
snake_case_ = requests.get(lowercase , headers=lowercase ).json()
snake_case_ = {}
try:
job_time.update({job["name"]: extract_time_from_single_job(lowercase ) for job in result["jobs"]} )
snake_case_ = math.ceil((result["total_count"] - 100) / 100 )
for i in range(lowercase ):
snake_case_ = requests.get(url + f'''&page={i + 2}''' , headers=lowercase ).json()
job_time.update({job["name"]: extract_time_from_single_job(lowercase ) for job in result["jobs"]} )
return job_time
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
lowercase__ = parser.parse_args()
lowercase__ = get_job_time(args.workflow_run_id)
lowercase__ = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f"""{k}: {v['duration']}""")
| 508
| 1
|
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
UpperCamelCase_ = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
UpperCamelCase_ = (
subprocess.check_output(f'git diff --diff-filter=d --name-only {fork_point_sha}'.split()).decode('utf-8').split()
)
UpperCamelCase_ = '|'.join(sys.argv[1:])
UpperCamelCase_ = re.compile(Rf'^({joined_dirs}).*?\.py$')
UpperCamelCase_ = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 142
|
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
UpperCamelCase_ = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
UpperCamelCase_ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> str:
if "://" in dataset_path:
__UpperCAmelCase =dataset_path.split('''://''' )[1]
return dataset_path
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ ) -> int:
__UpperCAmelCase =not is_remote_filesystem(snake_case__ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(snake_case__ ) , fs._strip_protocol(snake_case__ ) )
else:
fs.mv(snake_case__ , snake_case__ , recursive=snake_case__ )
def SCREAMING_SNAKE_CASE ( ) -> None:
if hasattr(fsspec.asyn , '''reset_lock''' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =threading.Lock()
| 142
| 1
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_snake_case : List[str] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
@dataclass
class A :
lowercase_ = field(
default='cifar10' ,metadata={'help': 'Name of a dataset from the datasets package'} )
lowercase_ = field(
default=_a ,metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowercase_ = field(
default=_a ,metadata={'help': 'The column name of the images in the files.'} )
lowercase_ = field(default=_a ,metadata={'help': 'A folder containing the training data.'} )
lowercase_ = field(default=_a ,metadata={'help': 'A folder containing the validation data.'} )
lowercase_ = field(
default=0.15 ,metadata={'help': 'Percent to split off of train for validation.'} )
lowercase_ = field(
default=_a ,metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} ,)
lowercase_ = field(
default=_a ,metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} ,)
def __lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
_a = {}
if self.train_dir is not None:
_a = self.train_dir
if self.validation_dir is not None:
_a = self.validation_dir
_a = data_files if data_files else None
@dataclass
class A :
lowercase_ = field(
default=_a ,metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} ,)
lowercase_ = field(
default=_a ,metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'} )
lowercase_ = field(
default=_a ,metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} ,)
lowercase_ = field(
default=_a ,metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
lowercase_ = field(
default='main' ,metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} ,)
lowercase_ = field(default=_a ,metadata={'help': 'Name or path of preprocessor config.'} )
lowercase_ = field(
default=_a ,metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} ,)
lowercase_ = field(
default=0.75 ,metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'} )
lowercase_ = field(
default=_a ,metadata={'help': 'Whether or not to train with normalized pixel values as target.'} )
@dataclass
class A ( _a ):
lowercase_ = field(
default=1e-3 ,metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'} )
def snake_case_ (UpperCamelCase : List[str] ):
'''simple docstring'''
_a = torch.stack([example['''pixel_values'''] for example in examples] )
return {"pixel_values": pixel_values}
def snake_case_ ():
'''simple docstring'''
_a = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_a , _a , _a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_a , _a , _a = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mae''' , UpperCamelCase , UpperCamelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_a = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase )
transformers.utils.logging.set_verbosity(UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
_a = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_a = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
_a = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
_a = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , UpperCamelCase ) and data_args.train_val_split > 0.0:
_a = ds['''train'''].train_test_split(data_args.train_val_split )
_a = split['''train''']
_a = split['''test''']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_a = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
_a = ViTMAEConfig.from_pretrained(model_args.config_name , **UpperCamelCase )
elif model_args.model_name_or_path:
_a = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **UpperCamelCase )
else:
_a = ViTMAEConfig()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(f'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(f'New config: {config}' )
# adapt config
config.update(
{
'''mask_ratio''': model_args.mask_ratio,
'''norm_pix_loss''': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
_a = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **UpperCamelCase )
elif model_args.model_name_or_path:
_a = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **UpperCamelCase )
else:
_a = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
_a = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
_a = ViTMAEForPreTraining(UpperCamelCase )
if training_args.do_train:
_a = ds['''train'''].column_names
else:
_a = ds['''validation'''].column_names
if data_args.image_column_name is not None:
_a = data_args.image_column_name
elif "image" in column_names:
_a = '''image'''
elif "img" in column_names:
_a = '''img'''
else:
_a = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
_a = image_processor.size['''shortest_edge''']
else:
_a = (image_processor.size['''height'''], image_processor.size['''width'''])
_a = Compose(
[
Lambda(lambda UpperCamelCase : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(UpperCamelCase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(UpperCamelCase : Union[str, Any] ):
_a = [transforms(UpperCamelCase ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
_a = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(UpperCamelCase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
_a = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(UpperCamelCase )
# Compute absolute learning rate
_a = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
_a = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
_a = Trainer(
model=UpperCamelCase , args=UpperCamelCase , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=UpperCamelCase , data_collator=UpperCamelCase , )
# Training
if training_args.do_train:
_a = None
if training_args.resume_from_checkpoint is not None:
_a = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_a = last_checkpoint
_a = trainer.train(resume_from_checkpoint=UpperCamelCase )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_a = trainer.evaluate()
trainer.log_metrics('''eval''' , UpperCamelCase )
trainer.save_metrics('''eval''' , UpperCamelCase )
# Write model card and (optionally) push to hub
_a = {
'''tasks''': '''masked-auto-encoding''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-auto-encoding'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCamelCase )
else:
trainer.create_model_card(**UpperCamelCase )
def snake_case_ (UpperCamelCase : Optional[int] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 22
|
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case (__lowercase , __lowercase , __lowercase):
# Initialise PyTorch model
UpperCamelCase_ = MobileBertConfig.from_json_file(__lowercase)
print(f"""Building PyTorch model from configuration: {config}""")
UpperCamelCase_ = MobileBertForPreTraining(__lowercase)
# Load weights from tf checkpoint
UpperCamelCase_ = load_tf_weights_in_mobilebert(__lowercase , __lowercase , __lowercase)
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""")
torch.save(model.state_dict() , __lowercase)
if __name__ == "__main__":
snake_case__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
snake_case__ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 23
| 0
|
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 100 * 2**20, 900 * 2**20] )
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , _lowerCAmelCase )
__lowercase =datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
__lowercase =dataset_size < in_memory_max_size
else:
__lowercase =False
__lowercase =is_small_dataset(_lowerCAmelCase )
assert result == expected
| 454
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCamelCase = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"""
def _A ( ):
"""simple docstring"""
__lowercase =_ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__lowercase =get_sagemaker_input()
else:
__lowercase =get_cluster_input()
return config
def _A ( _lowerCAmelCase=None ):
"""simple docstring"""
if subparsers is not None:
__lowercase =subparsers.add_parser('config' , description=_lowerCAmelCase )
else:
__lowercase =argparse.ArgumentParser('Accelerate config command' , description=_lowerCAmelCase )
parser.add_argument(
'--config_file' , default=_lowerCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=_lowerCAmelCase )
return parser
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =get_user_input()
if args.config_file is not None:
__lowercase =args.config_file
else:
if not os.path.isdir(_lowerCAmelCase ):
os.makedirs(_lowerCAmelCase )
__lowercase =default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(_lowerCAmelCase )
else:
config.to_yaml_file(_lowerCAmelCase )
print(f"""accelerate configuration saved at {config_file}""" )
def _A ( ):
"""simple docstring"""
__lowercase =config_command_parser()
__lowercase =parser.parse_args()
config_command(_lowerCAmelCase )
if __name__ == "__main__":
main()
| 454
| 1
|
def __snake_case ( lowerCAmelCase_ ) -> bool:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = f'''Input value of [number={number}] must be an integer'''
raise TypeError(lowerCAmelCase_ )
if number < 0:
return False
SCREAMING_SNAKE_CASE__ = number * number
while number > 0:
if number % 1_0 != number_square % 1_0:
return False
number //= 1_0
number_square //= 1_0
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 100
|
from __future__ import annotations
from collections.abc import Callable
lowercase__ : Optional[Any] = list[list[float | int]]
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
snake_case_ = len(_A )
snake_case_ = [[0 for _ in range(size + 1 )] for _ in range(_A )]
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
for row in range(_A ):
for col in range(_A ):
snake_case_ = matrix[row][col]
snake_case_ = vector[row][0]
snake_case_ = 0
snake_case_ = 0
while row < size and col < size:
# pivoting
snake_case_ = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_A , _A ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
snake_case_ , snake_case_ = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _A ):
snake_case_ = augmented[rowa][col] / augmented[row][col]
snake_case_ = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _A ):
for row in range(_A ):
snake_case_ = augmented[row][col] / augmented[col][col]
for cola in range(_A , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_A )
]
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = len(_A )
snake_case_ = [[0 for _ in range(_A )] for _ in range(_A )]
snake_case_ = [[0] for _ in range(_A )]
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
for x_val, y_val in enumerate(_A ):
for col in range(_A ):
snake_case_ = (x_val + 1) ** (size - col - 1)
snake_case_ = y_val
snake_case_ = solve(_A , _A )
def interpolated_func(_A ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_A ) )
return interpolated_func
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCamelCase__ ( _A = question_function , _A = 10 ):
'''simple docstring'''
snake_case_ = [func(_A ) for x_val in range(1 , order + 1 )]
snake_case_ = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
snake_case_ = 0
snake_case_ = 42
snake_case_ = 42
for poly in polynomials:
snake_case_ = 1
while func(_A ) == poly(_A ):
x_val += 1
ret += poly(_A )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 376
| 0
|
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __UpperCAmelCase ( __lowerCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE : Tuple = "OwlViTImageProcessor"
SCREAMING_SNAKE_CASE : Dict = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ):
UpperCAmelCase__ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _UpperCamelCase , )
UpperCAmelCase__ : Optional[int] = kwargs.pop('''feature_extractor''' )
UpperCAmelCase__ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_UpperCamelCase , _UpperCamelCase )
def __call__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="max_length" , _UpperCAmelCase="np" , **_UpperCAmelCase ):
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''' )
if text is not None:
if isinstance(_UpperCamelCase , _UpperCamelCase ) or (isinstance(_UpperCamelCase , _UpperCamelCase ) and not isinstance(text[0] , _UpperCamelCase )):
UpperCAmelCase__ : int = [self.tokenizer(_UpperCamelCase , padding=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )]
elif isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(text[0] , _UpperCamelCase ):
UpperCAmelCase__ : Union[str, Any] = []
# Maximum number of queries across batch
UpperCAmelCase__ : List[str] = max([len(_UpperCamelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_UpperCamelCase ) != max_num_queries:
UpperCAmelCase__ : Any = t + [""" """] * (max_num_queries - len(_UpperCamelCase ))
UpperCAmelCase__ : int = self.tokenizer(_UpperCamelCase , padding=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )
encodings.append(_UpperCamelCase )
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' )
if return_tensors == "np":
UpperCAmelCase__ : List[str] = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
UpperCAmelCase__ : Union[str, Any] = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
UpperCAmelCase__ : str = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
UpperCAmelCase__ : Optional[Any] = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
UpperCAmelCase__ : str = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 )
UpperCAmelCase__ : List[Any] = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
UpperCAmelCase__ : List[str] = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
UpperCAmelCase__ : List[str] = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
else:
raise ValueError('''Target return tensor type could not be returned''' )
UpperCAmelCase__ : Any = BatchEncoding()
UpperCAmelCase__ : str = input_ids
UpperCAmelCase__ : Union[str, Any] = attention_mask
if query_images is not None:
UpperCAmelCase__ : Optional[Any] = BatchEncoding()
UpperCAmelCase__ : int = self.image_processor(
_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase ).pixel_values
UpperCAmelCase__ : Tuple = query_pixel_values
if images is not None:
UpperCAmelCase__ : Any = self.image_processor(_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )
if text is not None and images is not None:
UpperCAmelCase__ : List[str] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
UpperCAmelCase__ : List[str] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCamelCase ) , tensor_type=_UpperCamelCase )
def lowerCamelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.image_processor.post_process(*_UpperCamelCase , **_UpperCamelCase )
def lowerCamelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.image_processor.post_process_object_detection(*_UpperCamelCase , **_UpperCamelCase )
def lowerCamelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.image_processor.post_process_image_guided_detection(*_UpperCamelCase , **_UpperCamelCase )
def lowerCamelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase )
def lowerCamelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase )
@property
def lowerCamelCase ( self ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _UpperCamelCase , )
return self.image_processor_class
@property
def lowerCamelCase ( self ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _UpperCamelCase , )
return self.image_processor
| 715
|
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class __UpperCAmelCase ( UpperCamelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = "char"
SCREAMING_SNAKE_CASE : Tuple = "bpe"
SCREAMING_SNAKE_CASE : Union[str, Any] = "wp"
UpperCamelCase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class __UpperCAmelCase ( UpperCamelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = ["image_processor", "char_tokenizer"]
SCREAMING_SNAKE_CASE : int = "ViTImageProcessor"
SCREAMING_SNAKE_CASE : List[Any] = "MgpstrTokenizer"
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ):
UpperCAmelCase__ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _UpperCAmelCase , )
UpperCAmelCase__ : int = kwargs.pop('''feature_extractor''' )
UpperCAmelCase__ : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
UpperCAmelCase__ : Tuple = tokenizer
UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained('''gpt2''' )
UpperCAmelCase__ : Optional[Any] = AutoTokenizer.from_pretrained('''bert-base-uncased''' )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ):
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
UpperCAmelCase__ : Tuple = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if text is not None:
UpperCAmelCase__ : int = self.char_tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
UpperCAmelCase__ : List[str] = encodings['''input_ids''']
return inputs
def lowerCamelCase ( self , _UpperCAmelCase ):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = sequences
UpperCAmelCase__ : int = char_preds.size(0 )
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self._decode_helper(_UpperCAmelCase , '''char''' )
UpperCAmelCase__ , UpperCAmelCase__ : str = self._decode_helper(_UpperCAmelCase , '''bpe''' )
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self._decode_helper(_UpperCAmelCase , '''wp''' )
UpperCAmelCase__ : Tuple = []
UpperCAmelCase__ : List[str] = []
for i in range(_UpperCAmelCase ):
UpperCAmelCase__ : List[str] = [char_scores[i], bpe_scores[i], wp_scores[i]]
UpperCAmelCase__ : str = [char_strs[i], bpe_strs[i], wp_strs[i]]
UpperCAmelCase__ : Tuple = scores.index(max(_UpperCAmelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
UpperCAmelCase__ : str = {}
UpperCAmelCase__ : Optional[int] = final_strs
UpperCAmelCase__ : List[str] = final_scores
UpperCAmelCase__ : List[Any] = char_strs
UpperCAmelCase__ : Optional[int] = bpe_strs
UpperCAmelCase__ : Tuple = wp_strs
return out
def lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
if format == DecodeType.CHARACTER:
UpperCAmelCase__ : str = self.char_decode
UpperCAmelCase__ : Dict = 1
UpperCAmelCase__ : Optional[int] = '''[s]'''
elif format == DecodeType.BPE:
UpperCAmelCase__ : Union[str, Any] = self.bpe_decode
UpperCAmelCase__ : List[Any] = 2
UpperCAmelCase__ : Any = '''#'''
elif format == DecodeType.WORDPIECE:
UpperCAmelCase__ : Optional[Any] = self.wp_decode
UpperCAmelCase__ : Optional[int] = 102
UpperCAmelCase__ : Any = '''[SEP]'''
else:
raise ValueError(F"""Format {format} is not supported.""" )
UpperCAmelCase__ , UpperCAmelCase__ : Dict = [], []
UpperCAmelCase__ : Dict = pred_logits.size(0 )
UpperCAmelCase__ : Tuple = pred_logits.size(1 )
UpperCAmelCase__ , UpperCAmelCase__ : int = pred_logits.topk(1 , dim=-1 , largest=_UpperCAmelCase , sorted=_UpperCAmelCase )
UpperCAmelCase__ : Optional[int] = preds_index.view(-1 , _UpperCAmelCase )[:, 1:]
UpperCAmelCase__ : List[Any] = decoder(_UpperCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = torch.nn.functional.softmax(_UpperCAmelCase , dim=2 ).max(dim=2 )
UpperCAmelCase__ : Tuple = preds_max_prob[:, 1:]
for index in range(_UpperCAmelCase ):
UpperCAmelCase__ : Union[str, Any] = preds_str[index].find(_UpperCAmelCase )
UpperCAmelCase__ : List[Any] = preds_str[index][:pred_eos]
UpperCAmelCase__ : Optional[Any] = preds_index[index].cpu().tolist()
UpperCAmelCase__ : Optional[int] = pred_index.index(_UpperCAmelCase ) if eos_token in pred_index else -1
UpperCAmelCase__ : int = preds_max_prob[index][: pred_eos_index + 1]
UpperCAmelCase__ : Tuple = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(_UpperCAmelCase )
conf_scores.append(_UpperCAmelCase )
return dec_strs, conf_scores
def lowerCamelCase ( self , _UpperCAmelCase ):
UpperCAmelCase__ : int = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(_UpperCAmelCase )]
return decode_strs
def lowerCamelCase ( self , _UpperCAmelCase ):
return self.bpe_tokenizer.batch_decode(_UpperCAmelCase )
def lowerCamelCase ( self , _UpperCAmelCase ):
UpperCAmelCase__ : Optional[int] = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(_UpperCAmelCase )]
return decode_strs
| 599
| 0
|
from functools import reduce
UpperCAmelCase_ : Optional[int] = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def lowerCAmelCase_ ( lowerCamelCase = N ):
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCamelCase , lowerCamelCase : str(int(lowerCamelCase ) * int(lowerCamelCase ) ) , n[i : i + 13] ) )
for i in range(len(lowerCamelCase ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 21
|
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__snake_case =[
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : bool , UpperCAmelCase__ : str = None , UpperCAmelCase__ : list = None ) -> Union[str, Any]:
lowerCAmelCase = None
lowerCAmelCase = os.path.abspath(os.path.join('examples' , 'by_feature' ) )
lowerCAmelCase = os.path.abspath('examples' )
for item in os.listdir(UpperCAmelCase__ ):
if item not in EXCLUDE_EXAMPLES:
lowerCAmelCase = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
if os.path.isfile(UpperCAmelCase__ ) and ".py" in item_path:
with self.subTest(
tested_script=UpperCAmelCase__ , feature_script=UpperCAmelCase__ , tested_section='main()' if parser_only else 'training_function()' , ):
lowerCAmelCase = compare_against_test(
os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = '\n'.join(UpperCAmelCase__ )
if special_strings is not None:
for string in special_strings:
lowerCAmelCase = diff.replace(UpperCAmelCase__ , '' )
self.assertEqual(UpperCAmelCase__ , '' )
def __UpperCAmelCase ( self : int ) -> int:
self.one_complete_example('complete_nlp_example.py' , UpperCAmelCase__ )
self.one_complete_example('complete_nlp_example.py' , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
lowerCAmelCase = os.path.abspath(os.path.join('examples' , 'cv_example.py' ) )
lowerCAmelCase = [
' ' * 1_6 + '{\n\n',
' ' * 2_0 + '"accuracy": eval_metric["accuracy"],\n\n',
' ' * 2_0 + '"f1": eval_metric["f1"],\n\n',
' ' * 2_0 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n',
' ' * 2_0 + '"epoch": epoch,\n\n',
' ' * 1_6 + '},\n\n',
' ' * 1_6 + 'step=epoch,\n',
' ' * 1_2,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py' , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
self.one_complete_example('complete_cv_example.py' , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Tuple = False
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] ) -> Union[str, Any]:
super().setUpClass()
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = os.path.join(cls._tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
lowerCAmelCase = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def __UpperCAmelCase ( cls : Optional[int] ) -> Optional[Any]:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def __UpperCAmelCase ( self : Any ) -> Dict:
lowerCAmelCase = F'''
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) )
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
lowerCAmelCase = F'''
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
'''.split()
lowerCAmelCase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) )
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
lowerCAmelCase = F'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}
'''.split()
lowerCAmelCase = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase__ )
self.assertNotIn('epoch 0:' , UpperCAmelCase__ )
self.assertIn('epoch 1:' , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Tuple ) -> Optional[int]:
lowerCAmelCase = F'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}
'''.split()
lowerCAmelCase = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase__ )
if torch.cuda.is_available():
lowerCAmelCase = torch.cuda.device_count()
else:
lowerCAmelCase = 1
if num_processes > 1:
self.assertNotIn('epoch 0:' , UpperCAmelCase__ )
self.assertIn('epoch 1:' , UpperCAmelCase__ )
else:
self.assertIn('epoch 0:' , UpperCAmelCase__ )
self.assertIn('epoch 1:' , UpperCAmelCase__ )
@slow
def __UpperCAmelCase ( self : List[str] ) -> str:
lowerCAmelCase = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ):
lowerCAmelCase = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase__ )
lowerCAmelCase = re.findall('({.+})' , UpperCAmelCase__ )
lowerCAmelCase = [r for r in results if 'accuracy' in r][-1]
lowerCAmelCase = ast.literal_eval(UpperCAmelCase__ )
self.assertGreaterEqual(results['accuracy'] , 0.75 )
def __UpperCAmelCase ( self : Any ) -> int:
lowerCAmelCase = ['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def __UpperCAmelCase ( self : Tuple ) -> str:
with tempfile.TemporaryDirectory() as tmpdir:
lowerCAmelCase = F'''
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , 'tracking' ) ) )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
lowerCAmelCase = ['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs )
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
lowerCAmelCase = ['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs )
| 133
| 0
|
from manim import *
class UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
def _snake_case ( self :Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Rectangle(height=0.5 , width=0.5 )
SCREAMING_SNAKE_CASE__ = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE__ = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE__ = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE__ = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
SCREAMING_SNAKE_CASE__ = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
SCREAMING_SNAKE_CASE__ = VGroup(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
SCREAMING_SNAKE_CASE__ = Text("""CPU""" , font_size=24 )
SCREAMING_SNAKE_CASE__ = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = [mem.copy() for i in range(1 )]
SCREAMING_SNAKE_CASE__ = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
SCREAMING_SNAKE_CASE__ = Text("""GPU""" , font_size=24 )
SCREAMING_SNAKE_CASE__ = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
gpu.align_to(UpperCamelCase__ , UpperCamelCase__ )
gpu.set_x(gpu.get_x() - 1 )
self.add(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE__ = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
SCREAMING_SNAKE_CASE__ = Text("""Model""" , font_size=24 )
SCREAMING_SNAKE_CASE__ = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
model.move_to([3, -1.0, 0] )
self.play(
Create(UpperCamelCase__ , run_time=1 ) , Create(UpperCamelCase__ , run_time=1 ) , Create(UpperCamelCase__ , run_time=1 ) , )
SCREAMING_SNAKE_CASE__ = MarkupText(
f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
SCREAMING_SNAKE_CASE__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE__ = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCamelCase__ , run_time=2.5 ) , Write(UpperCamelCase__ ) , Write(UpperCamelCase__ ) )
self.add(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
for i, rect in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(UpperCamelCase__ , opacity=0.7 )
cpu_target.move_to(UpperCamelCase__ )
cpu_target.generate_target()
SCREAMING_SNAKE_CASE__ = 0.4_6 / 4
SCREAMING_SNAKE_CASE__ = 0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=UpperCamelCase__ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=UpperCamelCase__ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=UpperCamelCase__ , buff=0.0 )
cpu_targs.append(UpperCamelCase__ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(UpperCamelCase__ ) )
second_animations.append(MoveToTarget(UpperCamelCase__ , run_time=1.5 ) )
self.play(*UpperCamelCase__ )
self.play(*UpperCamelCase__ )
self.wait()
| 711
|
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase_ = RoCBertTokenizer
lowerCamelCase_ = None
lowerCamelCase_ = False
lowerCamelCase_ = True
lowerCamelCase_ = filter_non_english
def _snake_case ( self :List[Any] ) -> List[Any]:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""]
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = {}
for i, value in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = i
SCREAMING_SNAKE_CASE__ = i
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_shape_file"""] )
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_pronunciation_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.word_shape_file , """w""" , encoding="""utf-8""" ) as word_shape_writer:
json.dump(__A , __A , ensure_ascii=__A )
with open(self.word_pronunciation_file , """w""" , encoding="""utf-8""" ) as word_pronunciation_writer:
json.dump(__A , __A , ensure_ascii=__A )
def _snake_case ( self :List[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize("""你好[SEP]你是谁""" )
self.assertListEqual(__A , ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__A ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__A ) , [5, 6, 2, 5, 7, 8] )
def _snake_case ( self :List[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def _snake_case ( self :List[str] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer(do_lower_case=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def _snake_case ( self :str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def _snake_case ( self :Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def _snake_case ( self :List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer(do_lower_case=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def _snake_case ( self :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer(do_lower_case=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _snake_case ( self :int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _snake_case ( self :Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _snake_case ( self :List[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer(do_lower_case=__A , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def _snake_case ( self :Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
SCREAMING_SNAKE_CASE__ = {}
for i, token in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = i
SCREAMING_SNAKE_CASE__ = RoCBertWordpieceTokenizer(vocab=__A , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def _snake_case ( self :Any ) -> str:
"""simple docstring"""
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def _snake_case ( self :int ) -> str:
"""simple docstring"""
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def _snake_case ( self :List[str] ) -> List[str]:
"""simple docstring"""
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def _snake_case ( self :str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__A ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
if self.test_rust_tokenizer:
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(__A ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
def _snake_case ( self :int ) -> Any:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(__A , **__A )
SCREAMING_SNAKE_CASE__ = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
SCREAMING_SNAKE_CASE__ = tokenizer_r.encode_plus(
__A , return_attention_mask=__A , return_token_type_ids=__A , return_offsets_mapping=__A , add_special_tokens=__A , )
SCREAMING_SNAKE_CASE__ = tokenizer_r.do_lower_case if hasattr(__A , """do_lower_case""" ) else False
SCREAMING_SNAKE_CASE__ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def _snake_case ( self :Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""的""", """人""", """有"""]
SCREAMING_SNAKE_CASE__ = """""".join(__A )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = self.tokenizer_class.from_pretrained(__A , **__A )
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(__A , **__A )
SCREAMING_SNAKE_CASE__ = tokenizer_p.encode(__A , add_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = tokenizer_r.encode(__A , add_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = tokenizer_r.convert_ids_to_tokens(__A )
SCREAMING_SNAKE_CASE__ = tokenizer_p.convert_ids_to_tokens(__A )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(__A , **__A )
SCREAMING_SNAKE_CASE__ = self.tokenizer_class.from_pretrained(__A , **__A )
SCREAMING_SNAKE_CASE__ = tokenizer_r.encode(__A , add_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = tokenizer_p.encode(__A , add_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = tokenizer_r.convert_ids_to_tokens(__A )
SCREAMING_SNAKE_CASE__ = tokenizer_p.convert_ids_to_tokens(__A )
# it is expected that only the first Chinese character is not preceded by "##".
SCREAMING_SNAKE_CASE__ = [
f'''##{token}''' if idx != 0 else token for idx, token in enumerate(__A )
]
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
@slow
def _snake_case ( self :Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
SCREAMING_SNAKE_CASE__ = tokenizer.encode("""你好""" , add_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = tokenizer.encode("""你是谁""" , add_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(__A )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(__A , __A )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _snake_case ( self :List[str] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_tokenizers(do_lower_case=__A )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE__ = """你好,你是谁"""
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__A )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_ids(__A )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_shape_ids(__A )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_pronunciation_ids(__A )
SCREAMING_SNAKE_CASE__ = tokenizer.prepare_for_model(
__A , __A , __A , add_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = tokenizer.encode_plus(__A , add_special_tokens=__A )
self.assertEqual(__A , __A )
| 59
| 0
|
"""simple docstring"""
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : List[str]=None , _UpperCamelCase : str=None , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : str=None , _UpperCamelCase : Optional[Any]=None , ) -> Optional[Any]:
'''simple docstring'''
if attention_mask is None:
__UpperCAmelCase : Optional[Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__UpperCAmelCase : Dict = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__UpperCAmelCase : Optional[int] = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=_UpperCamelCase )
if decoder_head_mask is None:
__UpperCAmelCase : Optional[int] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_UpperCamelCase )
if cross_attn_head_mask is None:
__UpperCAmelCase : Any = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_UpperCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any]=13 , UpperCamelCase : Dict=7 , UpperCamelCase : int=True , UpperCamelCase : str=False , UpperCamelCase : Any=99 , UpperCamelCase : Dict=16 , UpperCamelCase : Dict=2 , UpperCamelCase : Dict=4 , UpperCamelCase : Optional[Any]=4 , UpperCamelCase : int="relu" , UpperCamelCase : Any=0.1 , UpperCamelCase : Union[str, Any]=0.1 , UpperCamelCase : str=0.0 , UpperCamelCase : List[str]=0.0 , UpperCamelCase : Optional[Any]=20 , UpperCamelCase : str=2 , UpperCamelCase : Any=1 , UpperCamelCase : Dict=0 , ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = parent
__UpperCAmelCase : Optional[Any] = batch_size
__UpperCAmelCase : List[Any] = seq_length
__UpperCAmelCase : int = is_training
__UpperCAmelCase : Any = use_labels
__UpperCAmelCase : str = vocab_size
__UpperCAmelCase : int = hidden_size
__UpperCAmelCase : Any = num_hidden_layers
__UpperCAmelCase : int = num_attention_heads
__UpperCAmelCase : Dict = intermediate_size
__UpperCAmelCase : Dict = hidden_act
__UpperCAmelCase : Optional[Any] = hidden_dropout_prob
__UpperCAmelCase : Any = attention_probs_dropout_prob
__UpperCAmelCase : Dict = encoder_layerdrop
__UpperCAmelCase : List[Any] = decoder_layerdrop
__UpperCAmelCase : List[str] = max_position_embeddings
__UpperCAmelCase : int = eos_token_id
__UpperCAmelCase : str = pad_token_id
__UpperCAmelCase : Union[str, Any] = bos_token_id
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Optional[Any] = self.eos_token_id # Eos Token
__UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__UpperCAmelCase : List[str] = input_ids.clamp(self.pad_token_id + 1 )
__UpperCAmelCase : int = decoder_input_ids.clamp(self.pad_token_id + 1 )
__UpperCAmelCase : Dict = self.get_config()
__UpperCAmelCase : Optional[int] = prepare_mam_aaa_inputs_dict(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return config, inputs_dict
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : Dict = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase__ ( self : Dict , UpperCamelCase : int , UpperCamelCase : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = MaMaaaModel(config=UpperCamelCase ).get_decoder().to(UpperCamelCase ).eval()
__UpperCAmelCase : List[Any] = inputs_dict["""input_ids"""]
__UpperCAmelCase : Any = inputs_dict["""attention_mask"""]
__UpperCAmelCase : Optional[Any] = inputs_dict["""head_mask"""]
# first forward pass
__UpperCAmelCase : Optional[int] = model(UpperCamelCase , attention_mask=UpperCamelCase , head_mask=UpperCamelCase , use_cache=UpperCamelCase )
__UpperCAmelCase ,__UpperCAmelCase : str = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__UpperCAmelCase : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
__UpperCAmelCase : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCAmelCase : Union[str, Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
__UpperCAmelCase : Union[str, Any] = model(UpperCamelCase , attention_mask=UpperCamelCase )["""last_hidden_state"""]
__UpperCAmelCase : List[Any] = model(UpperCamelCase , attention_mask=UpperCamelCase , past_key_values=UpperCamelCase )[
"""last_hidden_state"""
]
# select random slice
__UpperCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCAmelCase : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCAmelCase : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-2 ) )
def lowerCamelCase__ ( self : Any , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = MaMaaaModel(config=UpperCamelCase ).to(UpperCamelCase ).eval()
__UpperCAmelCase : Optional[Any] = model(**UpperCamelCase )
__UpperCAmelCase : Any = outputs.encoder_last_hidden_state
__UpperCAmelCase : Dict = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : str = model.get_encoder()
encoder.save_pretrained(UpperCamelCase )
__UpperCAmelCase : Optional[int] = MaMaaaEncoder.from_pretrained(UpperCamelCase ).to(UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = encoder(inputs_dict["""input_ids"""] , attention_mask=inputs_dict["""attention_mask"""] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Any = model.get_decoder()
decoder.save_pretrained(UpperCamelCase )
__UpperCAmelCase : str = MaMaaaDecoder.from_pretrained(UpperCamelCase ).to(UpperCamelCase )
__UpperCAmelCase : List[str] = decoder(
input_ids=inputs_dict["""decoder_input_ids"""] , attention_mask=inputs_dict["""decoder_attention_mask"""] , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=inputs_dict["""attention_mask"""] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class lowerCamelCase__ ( A , A , A , unittest.TestCase ):
"""simple docstring"""
__a = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
__a = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
__a = (
{
"""conversational""": MaMaaaForConditionalGeneration,
"""feature-extraction""": MaMaaaModel,
"""summarization""": MaMaaaForConditionalGeneration,
"""text2text-generation""": MaMaaaForConditionalGeneration,
"""translation""": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
__a = True
__a = True
__a = False
__a = False
def lowerCamelCase__ ( self : Dict , UpperCamelCase : Any , UpperCamelCase : Tuple , UpperCamelCase : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : List[str] ):
'''simple docstring'''
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = MaMaaaModelTester(self )
__UpperCAmelCase : List[str] = ConfigTester(self , config_class=UpperCamelCase )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__UpperCAmelCase : Tuple = model_class(UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase )
__UpperCAmelCase ,__UpperCAmelCase : Optional[int] = model_class.from_pretrained(UpperCamelCase , output_loading_info=UpperCamelCase )
self.assertEqual(info["""missing_keys"""] , [] )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*UpperCamelCase )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*UpperCamelCase )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
__UpperCAmelCase : Optional[Any] = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__UpperCAmelCase : Union[str, Any] = copy.deepcopy(self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
if not self.is_encoder_decoder:
__UpperCAmelCase : List[Any] = inputs["""input_ids"""]
del inputs["input_ids"]
else:
__UpperCAmelCase : Optional[Any] = inputs["""input_ids"""]
__UpperCAmelCase : int = inputs.get("""decoder_input_ids""" , UpperCamelCase )
del inputs["input_ids"]
inputs.pop("""decoder_input_ids""" , UpperCamelCase )
__UpperCAmelCase : Optional[Any] = model.get_input_embeddings()
if not self.is_encoder_decoder:
__UpperCAmelCase : Dict = wte(UpperCamelCase )
else:
__UpperCAmelCase : Optional[Any] = wte(UpperCamelCase )
__UpperCAmelCase : int = wte(UpperCamelCase )
with torch.no_grad():
model(**UpperCamelCase )[0]
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
__UpperCAmelCase : Any = input_dict["""input_ids"""]
__UpperCAmelCase : List[str] = input_ids.ne(1 ).to(UpperCamelCase )
__UpperCAmelCase : Tuple = MaMaaaForConditionalGeneration(UpperCamelCase ).eval().to(UpperCamelCase )
if torch_device == "cuda":
model.half()
model.generate(UpperCamelCase , attention_mask=UpperCamelCase )
model.generate(num_beams=4 , do_sample=UpperCamelCase , early_stopping=UpperCamelCase , num_return_sequences=3 )
def lowerCamelCase ( _UpperCamelCase : Tuple ) -> List[Any]:
'''simple docstring'''
return torch.tensor(_UpperCamelCase , dtype=torch.long , device=_UpperCamelCase )
UpperCAmelCase : str = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = MaMaaaModel.from_pretrained("""facebook/m2m100_418M""" ).to(UpperCamelCase )
__UpperCAmelCase : Optional[int] = _long_tensor([[128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38, 2]] )
__UpperCAmelCase : Optional[int] = _long_tensor([[2, 128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38]] )
__UpperCAmelCase : Any = prepare_mam_aaa_inputs_dict(model.config , UpperCamelCase , UpperCamelCase )
with torch.no_grad():
__UpperCAmelCase : Optional[int] = model(**UpperCamelCase )[0]
__UpperCAmelCase : Any = torch.Size((1, 11, 1_024) )
self.assertEqual(output.shape , UpperCamelCase )
# change to expected output here
__UpperCAmelCase : Dict = torch.tensor(
[[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] , device=UpperCamelCase )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase , atol=UpperCamelCase ) )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : int = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(UpperCamelCase )
# change to intended input
__UpperCAmelCase : List[Any] = _long_tensor([[128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38, 2]] )
__UpperCAmelCase : Any = _long_tensor([[2, 128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38]] )
__UpperCAmelCase : Optional[Any] = prepare_mam_aaa_inputs_dict(model.config , UpperCamelCase , UpperCamelCase )
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(**UpperCamelCase )[0]
__UpperCAmelCase : Dict = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , UpperCamelCase )
# change to expected output here
__UpperCAmelCase : Optional[int] = torch.tensor(
[[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] , device=UpperCamelCase )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase , atol=UpperCamelCase ) )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : str = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" , src_lang="""fr""" , tgt_lang="""en""" )
__UpperCAmelCase : Optional[Any] = [
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"""
""" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"""
""" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.""",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
__UpperCAmelCase : Tuple = tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors="""pt""" )
__UpperCAmelCase : str = model.generate(
input_ids=dct["""input_ids"""].to(UpperCamelCase ) , attention_mask=dct["""attention_mask"""].to(UpperCamelCase ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("""en""" ) , )
__UpperCAmelCase : List[str] = [
"""The NSA case highlights the total absence of intelligence debate""",
"""I think there are two levels of response from the French government.""",
"""When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."""
""" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"""
""" communications in France.""",
]
__UpperCAmelCase : Dict = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=UpperCamelCase , skip_special_tokens=UpperCamelCase )
assert generated == expected_en
| 139
|
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
UpperCAmelCase : Dict = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
UpperCAmelCase : List[Any] = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
UpperCAmelCase : int = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""http://www.cs.umd.edu/~snover/tercom/""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#ter"""] , reference_urls=[
"""https://github.com/jhclark/tercom""",
] , )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : int , UpperCamelCase : Dict , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : bool = False , ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = len(references[0] )
if any(len(UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
__UpperCAmelCase : Any = [[refs[i] for refs in references] for i in range(UpperCamelCase )]
__UpperCAmelCase : List[str] = TER(
normalized=UpperCamelCase , no_punct=UpperCamelCase , asian_support=UpperCamelCase , case_sensitive=UpperCamelCase , )
__UpperCAmelCase : Tuple = sb_ter.corpus_score(UpperCamelCase , UpperCamelCase )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 139
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class snake_case__ ( unittest.TestCase):
'''simple docstring'''
def __init__( self , a__ , a__=7 , a__=3 , a__=18 , a__=30 , a__=4_00 , a__=True , a__=32 , a__=True , ) -> List[Any]:
'''simple docstring'''
__snake_case :List[Any] = parent
__snake_case :Dict = batch_size
__snake_case :Optional[Any] = num_channels
__snake_case :Dict = image_size
__snake_case :Dict = min_resolution
__snake_case :Dict = max_resolution
__snake_case :List[Any] = do_resize
__snake_case :Dict = size_divisor
__snake_case :Union[str, Any] = do_rescale
def __lowercase ( self ) -> Tuple:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class snake_case__ ( lowercase_ , unittest.TestCase):
'''simple docstring'''
lowerCamelCase : Tuple = GLPNImageProcessor if is_vision_available() else None
def __lowercase ( self ) -> Tuple:
'''simple docstring'''
__snake_case :Dict = GLPNImageProcessingTester(self )
@property
def __lowercase ( self ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase ( self ) -> List[str]:
'''simple docstring'''
__snake_case :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__ , """do_resize""" ) )
self.assertTrue(hasattr(a__ , """size_divisor""" ) )
self.assertTrue(hasattr(a__ , """resample""" ) )
self.assertTrue(hasattr(a__ , """do_rescale""" ) )
def __lowercase ( self ) -> str:
'''simple docstring'''
pass
def __lowercase ( self ) -> Dict:
'''simple docstring'''
__snake_case :int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__snake_case :Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __lowercase ( self ) -> Tuple:
'''simple docstring'''
__snake_case :int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case :List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , numpify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__snake_case :int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __lowercase ( self ) -> Any:
'''simple docstring'''
__snake_case :Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case :Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__snake_case :List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 291
|
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
lowerCamelCase__ = """pytorch_model.bin"""
lowerCamelCase__ = """pytorch_model.bin.index.json"""
lowerCamelCase__ = """adapter_config.json"""
lowerCamelCase__ = """adapter_model.bin"""
lowerCamelCase__ = """adapter_model.safetensors"""
lowerCamelCase__ = """tf_model.h5"""
lowerCamelCase__ = """tf_model.h5.index.json"""
lowerCamelCase__ = """model.ckpt"""
lowerCamelCase__ = """flax_model.msgpack"""
lowerCamelCase__ = """flax_model.msgpack.index.json"""
lowerCamelCase__ = """model.safetensors"""
lowerCamelCase__ = """model.safetensors.index.json"""
lowerCamelCase__ = """config.json"""
lowerCamelCase__ = """preprocessor_config.json"""
lowerCamelCase__ = FEATURE_EXTRACTOR_NAME
lowerCamelCase__ = """generation_config.json"""
lowerCamelCase__ = """modelcard.json"""
lowerCamelCase__ = """▁"""
lowerCamelCase__ = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
lowerCamelCase__ = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
lowerCamelCase__ = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
lowerCamelCase__ = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def UpperCamelCase ( snake_case__ : Dict ):
'''simple docstring'''
if version.parse(snake_case__ ) < version.parse(snake_case__ ):
if "dev" in min_version:
__snake_case :Tuple = (
"""This example requires a source install from HuggingFace Transformers (see """
"""`https://huggingface.co/docs/transformers/installation#install-from-source`),"""
)
else:
__snake_case :List[str] = f'''This example requires a minimum version of {min_version},'''
error_message += f''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ """Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other """
"""versions of HuggingFace Transformers.""" )
| 291
| 1
|
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _snake_case :
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=True , a__=False , a__=False , a__=False , a__=2 , a__=99 , a__=0 , a__=32 , a__=5 , a__=4 , a__=0.1 , a__=0.1 , a__=512 , a__=2 , a__=0.0_2 , a__=2 , a__=4 , a__="last" , a__=True , a__=None , a__=0 , ) -> List[Any]:
'''simple docstring'''
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_lengths
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = gelu_activation
snake_case_ = sinusoidal_embeddings
snake_case_ = causal
snake_case_ = asm
snake_case_ = n_langs
snake_case_ = vocab_size
snake_case_ = n_special
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = summary_type
snake_case_ = use_proj
snake_case_ = scope
snake_case_ = bos_token_id
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_input_lengths:
snake_case_ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , 2 ).float()
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = XLMModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case_ = model(UpperCAmelCase__ , lengths=UpperCAmelCase__ , langs=UpperCAmelCase__ )
snake_case_ = model(UpperCAmelCase__ , langs=UpperCAmelCase__ )
snake_case_ = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ) -> Any:
'''simple docstring'''
snake_case_ = XLMWithLMHeadModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case_ = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ) -> List[Any]:
'''simple docstring'''
snake_case_ = XLMForQuestionAnsweringSimple(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case_ = model(UpperCAmelCase__ )
snake_case_ = model(UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ )
snake_case_ = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = XLMForQuestionAnswering(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case_ = model(UpperCAmelCase__ )
snake_case_ = model(
UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , cls_index=UpperCAmelCase__ , is_impossible=UpperCAmelCase__ , p_mask=UpperCAmelCase__ , )
snake_case_ = model(
UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , cls_index=UpperCAmelCase__ , is_impossible=UpperCAmelCase__ , )
((snake_case_ ) , ) = result_with_labels.to_tuple()
snake_case_ = model(UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ )
((snake_case_ ) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ) -> int:
'''simple docstring'''
snake_case_ = XLMForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case_ = model(UpperCAmelCase__ )
snake_case_ = model(UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ) -> str:
'''simple docstring'''
snake_case_ = self.num_labels
snake_case_ = XLMForTokenClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case_ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ) -> List[str]:
'''simple docstring'''
snake_case_ = self.num_choices
snake_case_ = XLMForMultipleChoice(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths}
return config, inputs_dict
@require_torch
class _snake_case ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase_ : List[str] = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ : Optional[Any] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowerCAmelCase_ : Any = (
{
"feature-extraction": XLMModel,
"fill-mask": XLMWithLMHeadModel,
"question-answering": XLMForQuestionAnsweringSimple,
"text-classification": XLMForSequenceClassification,
"text-generation": XLMWithLMHeadModel,
"token-classification": XLMForTokenClassification,
"zero-shot": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__ ) -> str:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowerCAmelCase__ ( self , a__ , a__ , a__=False ) -> Tuple:
'''simple docstring'''
snake_case_ = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
snake_case_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ )
snake_case_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ )
return inputs_dict
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = XLMModelTester(self )
snake_case_ = ConfigTester(self , config_class=UpperCAmelCase__ , emb_dim=37 )
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*UpperCAmelCase__ )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*UpperCAmelCase__ )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*UpperCAmelCase__ )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*UpperCAmelCase__ )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*UpperCAmelCase__ )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*UpperCAmelCase__ )
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*UpperCAmelCase__ )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__ , a__=False , a__=1 ) -> int:
'''simple docstring'''
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertListEqual(
[isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for iter_attentions in attentions] , [True] * len(UpperCAmelCase__ ) )
self.assertEqual(len(UpperCAmelCase__ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(UpperCAmelCase__ ):
# adds PAD dummy token
snake_case_ = min_length + idx + 1
snake_case_ = min_length + idx + 1
snake_case_ = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(UpperCAmelCase__ ) )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__ , a__=False , a__=1 ) -> Dict:
'''simple docstring'''
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertListEqual(
[isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for iter_hidden_states in hidden_states] , [True] * len(UpperCAmelCase__ ) , )
self.assertEqual(len(UpperCAmelCase__ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(UpperCAmelCase__ ):
# adds PAD dummy token
snake_case_ = min_length + idx + 1
snake_case_ = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(UpperCAmelCase__ ) , )
pass
@slow
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = XLMModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@require_torch
class _snake_case ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048" )
model.to(UpperCAmelCase__ )
snake_case_ = torch.tensor([[14, 447]] , dtype=torch.long , device=UpperCAmelCase__ ) # the president
snake_case_ = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
snake_case_ = model.generate(UpperCAmelCase__ , do_sample=UpperCAmelCase__ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , UpperCAmelCase__ )
| 400
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : List[str] = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''openai-gpt'''
UpperCAmelCase__ = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Dict=40_478 , UpperCAmelCase__ : str=512 , UpperCAmelCase__ : Union[str, Any]=768 , UpperCAmelCase__ : Optional[Any]=12 , UpperCAmelCase__ : Any=12 , UpperCAmelCase__ : Optional[Any]="gelu" , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : List[str]=1e-5 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : Any="cls_index" , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Optional[Any]=0.1 , **UpperCAmelCase__ : Dict , ) ->Any:
'''simple docstring'''
A__ = vocab_size
A__ = n_positions
A__ = n_embd
A__ = n_layer
A__ = n_head
A__ = afn
A__ = resid_pdrop
A__ = embd_pdrop
A__ = attn_pdrop
A__ = layer_norm_epsilon
A__ = initializer_range
A__ = summary_type
A__ = summary_use_proj
A__ = summary_activation
A__ = summary_first_dropout
A__ = summary_proj_to_labels
super().__init__(**UpperCAmelCase__)
| 87
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ ={
"""configuration_efficientformer""": [
"""EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientFormerConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ =["""EfficientFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ =[
"""EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientFormerForImageClassification""",
"""EfficientFormerForImageClassificationWithTeacher""",
"""EfficientFormerModel""",
"""EfficientFormerPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ =[
"""TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFEfficientFormerForImageClassification""",
"""TFEfficientFormerForImageClassificationWithTeacher""",
"""TFEfficientFormerModel""",
"""TFEfficientFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 33
|
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__a : str =MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase = hf_hub_download(
repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
lowerCAmelCase = VideoClassificationPipeline(model=UpperCAmelCase_ , image_processor=UpperCAmelCase_ , top_k=2 )
lowerCAmelCase = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ):
for example in examples:
lowerCAmelCase = video_classifier(UpperCAmelCase_ )
self.assertEqual(
UpperCAmelCase_ , [
{'''score''': ANY(UpperCAmelCase_ ), '''label''': ANY(UpperCAmelCase_ )},
{'''score''': ANY(UpperCAmelCase_ ), '''label''': ANY(UpperCAmelCase_ )},
] , )
@require_torch
def __snake_case ( self ):
lowerCAmelCase = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
lowerCAmelCase = VideoMAEFeatureExtractor(
size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} )
lowerCAmelCase = pipeline(
'''video-classification''' , model=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , frame_sampling_rate=4 )
lowerCAmelCase = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
lowerCAmelCase = video_classifier(UpperCAmelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}] , )
lowerCAmelCase = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [
[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],
[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],
] , )
@require_tf
def __snake_case ( self ):
pass
| 33
| 1
|
def A__ ( __A : int = 10**9 ) ->int:
__A =1
__A =2
__A =0
__A =0
__A =0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__A =2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F"""{solution() = }""")
| 184
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_lowerCamelCase : Tuple = {'''tokenization_byt5''': ['''ByT5Tokenizer''']}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
_lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 184
| 1
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
A__ : Any = logging.get_logger(__name__)
A__ : List[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A__ : List[Any] = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
A__ : Optional[int] = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
A__ : Optional[Any] = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
A__ : str = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_12,
'facebook/dpr-ctx_encoder-multiset-base': 5_12,
}
A__ : List[Any] = {
'facebook/dpr-question_encoder-single-nq-base': 5_12,
'facebook/dpr-question_encoder-multiset-base': 5_12,
}
A__ : Union[str, Any] = {
'facebook/dpr-reader-single-nq-base': 5_12,
'facebook/dpr-reader-multiset-base': 5_12,
}
A__ : int = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
A__ : str = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
A__ : int = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
A__ : List[Any] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
A__ : int = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
A__ : int = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(A__ )
class _UpperCAmelCase :
"""simple docstring"""
def __call__( self : int, lowerCamelCase : Tuple, lowerCamelCase : Optional[str] = None, lowerCamelCase : Optional[str] = None, lowerCamelCase : Union[bool, str] = False, lowerCamelCase : Union[bool, str] = False, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[Union[str, TensorType]] = None, lowerCamelCase : Optional[bool] = None, **lowerCamelCase : List[str], ):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, return_tensors=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
elif titles is None or texts is None:
lowercase__ = titles if texts is None else texts
return super().__call__(
lowerCamelCase, lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, return_tensors=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
lowercase__ = titles if not isinstance(lowerCamelCase, lowerCamelCase ) else [titles]
lowercase__ = texts if not isinstance(lowerCamelCase, lowerCamelCase ) else [texts]
lowercase__ = len(lowerCamelCase )
lowercase__ = questions if not isinstance(lowerCamelCase, lowerCamelCase ) else [questions] * n_passages
if len(lowerCamelCase ) != len(lowerCamelCase ):
raise ValueError(
F"""There should be as many titles than texts but got {len(lowerCamelCase )} titles and {len(lowerCamelCase )} texts.""" )
lowercase__ = super().__call__(lowerCamelCase, lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase )['''input_ids''']
lowercase__ = super().__call__(lowerCamelCase, add_special_tokens=lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase )['''input_ids''']
lowercase__ = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCamelCase, lowerCamelCase )
]
}
if return_attention_mask is not False:
lowercase__ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowercase__ = attention_mask
return self.pad(lowerCamelCase, padding=lowerCamelCase, max_length=lowerCamelCase, return_tensors=lowerCamelCase )
def lowercase__ ( self : Tuple, lowerCamelCase : BatchEncoding, lowerCamelCase : DPRReaderOutput, lowerCamelCase : int = 16, lowerCamelCase : int = 64, lowerCamelCase : int = 4, ):
'''simple docstring'''
lowercase__ = reader_input['''input_ids''']
lowercase__ , lowercase__ , lowercase__ = reader_output[:3]
lowercase__ = len(lowerCamelCase )
lowercase__ = sorted(range(lowerCamelCase ), reverse=lowerCamelCase, key=relevance_logits.__getitem__ )
lowercase__ = []
for doc_id in sorted_docs:
lowercase__ = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowercase__ = sequence_ids.index(self.sep_token_id, 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowercase__ = sequence_ids.index(self.pad_token_id )
else:
lowercase__ = len(lowerCamelCase )
lowercase__ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len], end_logits=end_logits[doc_id][passage_offset:sequence_len], max_answer_length=lowerCamelCase, top_spans=lowerCamelCase, )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index], relevance_score=relevance_logits[doc_id], doc_id=lowerCamelCase, start_index=lowerCamelCase, end_index=lowerCamelCase, text=self.decode(sequence_ids[start_index : end_index + 1] ), ) )
if len(lowerCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowercase__ ( self : Union[str, Any], lowerCamelCase : List[int], lowerCamelCase : List[int], lowerCamelCase : int, lowerCamelCase : int, ):
'''simple docstring'''
lowercase__ = []
for start_index, start_score in enumerate(lowerCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowercase__ = sorted(lowerCamelCase, key=lambda lowerCamelCase : x[1], reverse=lowerCamelCase )
lowercase__ = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""" )
lowercase__ = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(A__ )
class _UpperCAmelCase ( A__ ,A__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = READER_PRETRAINED_VOCAB_FILES_MAP
lowercase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = READER_PRETRAINED_INIT_CONFIGURATION
lowercase__ = ["""input_ids""", """attention_mask"""]
| 710
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
A__ : Tuple = logging.get_logger(__name__)
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = ["""input_features""", """is_longer"""]
def __init__( self : Optional[int], lowerCamelCase : int=64, lowerCamelCase : Union[str, Any]=48_000, lowerCamelCase : str=480, lowerCamelCase : Tuple=10, lowerCamelCase : List[Any]=1_024, lowerCamelCase : Optional[int]=0.0, lowerCamelCase : Optional[Any]=False, lowerCamelCase : float = 0, lowerCamelCase : float = 14_000, lowerCamelCase : int = None, lowerCamelCase : str = "fusion", lowerCamelCase : str = "repeatpad", **lowerCamelCase : Dict, ):
'''simple docstring'''
super().__init__(
feature_size=lowerCamelCase, sampling_rate=lowerCamelCase, padding_value=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
lowercase__ = top_db
lowercase__ = truncation
lowercase__ = padding
lowercase__ = fft_window_size
lowercase__ = (fft_window_size >> 1) + 1
lowercase__ = hop_length
lowercase__ = max_length_s
lowercase__ = max_length_s * sampling_rate
lowercase__ = sampling_rate
lowercase__ = frequency_min
lowercase__ = frequency_max
lowercase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase, min_frequency=lowerCamelCase, max_frequency=lowerCamelCase, sampling_rate=lowerCamelCase, norm=lowerCamelCase, mel_scale='''htk''', )
lowercase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase, min_frequency=lowerCamelCase, max_frequency=lowerCamelCase, sampling_rate=lowerCamelCase, norm='''slaney''', mel_scale='''slaney''', )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowercase__ ( self : Optional[int], lowerCamelCase : np.array, lowerCamelCase : Optional[np.array] = None ):
'''simple docstring'''
lowercase__ = spectrogram(
lowerCamelCase, window_function(self.fft_window_size, '''hann''' ), frame_length=self.fft_window_size, hop_length=self.hop_length, power=2.0, mel_filters=lowerCamelCase, log_mel='''dB''', )
return log_mel_spectrogram.T
def lowercase__ ( self : int, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = np.array_split(list(range(0, total_frames - chunk_frames + 1 ) ), 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase__ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase__ = [0]
# randomly choose index for each part
lowercase__ = np.random.choice(ranges[0] )
lowercase__ = np.random.choice(ranges[1] )
lowercase__ = np.random.choice(ranges[2] )
lowercase__ = mel[idx_front : idx_front + chunk_frames, :]
lowercase__ = mel[idx_middle : idx_middle + chunk_frames, :]
lowercase__ = mel[idx_back : idx_back + chunk_frames, :]
lowercase__ = torch.tensor(mel[None, None, :] )
lowercase__ = torch.nn.functional.interpolate(
lowerCamelCase, size=[chunk_frames, 64], mode='''bilinear''', align_corners=lowerCamelCase )
lowercase__ = mel_shrink[0][0].numpy()
lowercase__ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back], axis=0 )
return mel_fusion
def lowercase__ ( self : List[str], lowerCamelCase : np.array, lowerCamelCase : int, lowerCamelCase : Dict, lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowercase__ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowercase__ = len(lowerCamelCase ) - max_length
lowercase__ = np.random.randint(0, overflow + 1 )
lowercase__ = waveform[idx : idx + max_length]
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters )
lowercase__ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowercase__ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowercase__ = np.stack([mel, mel, mel, mel], axis=0 )
lowercase__ = False
else:
lowercase__ = self._random_mel_fusion(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = True
else:
raise NotImplementedError(F"""data_truncating {truncation} not implemented""" )
else:
lowercase__ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowercase__ = int(max_length / len(lowerCamelCase ) )
lowercase__ = np.stack(np.tile(lowerCamelCase, n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowercase__ = int(max_length / len(lowerCamelCase ) )
lowercase__ = np.stack(np.tile(lowerCamelCase, lowerCamelCase ) )
lowercase__ = np.pad(lowerCamelCase, (0, max_length - waveform.shape[0]), mode='''constant''', constant_values=0 )
if truncation == "fusion":
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters )
lowercase__ = np.stack([input_mel, input_mel, input_mel, input_mel], axis=0 )
else:
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any], lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], lowerCamelCase : str = None, lowerCamelCase : Optional[str] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[Union[str, TensorType]] = None, **lowerCamelCase : List[str], ):
'''simple docstring'''
lowercase__ = truncation if truncation is not None else self.truncation
lowercase__ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowercase__ = isinstance(lowerCamelCase, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
lowercase__ = is_batched_numpy or (
isinstance(lowerCamelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ = [np.asarray(lowerCamelCase, dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase, np.ndarray ):
lowercase__ = np.asarray(lowerCamelCase, dtype=np.floataa )
elif isinstance(lowerCamelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ = [np.asarray(lowerCamelCase )]
# convert to mel spectrogram, truncate and pad if needed.
lowercase__ = [
self._get_input_mel(lowerCamelCase, max_length if max_length else self.nb_max_samples, lowerCamelCase, lowerCamelCase )
for waveform in raw_speech
]
lowercase__ = []
lowercase__ = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase )
is_longer.append(lowerCamelCase )
if truncation == "fusion" and sum(lowerCamelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowercase__ = np.random.randint(0, len(lowerCamelCase ) )
lowercase__ = True
if isinstance(input_mel[0], lowerCamelCase ):
lowercase__ = [np.asarray(lowerCamelCase, dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowercase__ = [[longer] for longer in is_longer]
lowercase__ = {'''input_features''': input_mel, '''is_longer''': is_longer}
lowercase__ = BatchFeature(lowerCamelCase )
if return_tensors is not None:
lowercase__ = input_features.convert_to_tensors(lowerCamelCase )
return input_features
| 671
| 0
|
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
UpperCAmelCase__ = logging.get_logger(__name__)
class snake_case_ ( __UpperCamelCase ):
"""simple docstring"""
snake_case__ = """AutoTokenizer"""
snake_case__ = ["""tokenizer"""]
snake_case__ = {
"""semantic_prompt""": 1,
"""coarse_prompt""": 2,
"""fine_prompt""": 2,
}
def __init__(self: List[str] , __UpperCAmelCase: str , __UpperCAmelCase: List[Any]=None ) -> Tuple:
'''simple docstring'''
super().__init__(__UpperCAmelCase )
__a : Optional[int] = speaker_embeddings
@classmethod
def UpperCAmelCase__ (cls: Optional[Any] , __UpperCAmelCase: Optional[int] , __UpperCAmelCase: int="speaker_embeddings_path.json" , **__UpperCAmelCase: int ) -> str:
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
__a : Optional[Any] = get_file_from_repo(
__UpperCAmelCase , __UpperCAmelCase , subfolder=kwargs.pop("subfolder" , __UpperCAmelCase ) , cache_dir=kwargs.pop("cache_dir" , __UpperCAmelCase ) , force_download=kwargs.pop("force_download" , __UpperCAmelCase ) , proxies=kwargs.pop("proxies" , __UpperCAmelCase ) , resume_download=kwargs.pop("resume_download" , __UpperCAmelCase ) , local_files_only=kwargs.pop("local_files_only" , __UpperCAmelCase ) , use_auth_token=kwargs.pop("use_auth_token" , __UpperCAmelCase ) , revision=kwargs.pop("revision" , __UpperCAmelCase ) , )
if speaker_embeddings_path is None:
logger.warning(
f'`{os.path.join(__UpperCAmelCase , __UpperCAmelCase )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
__a : Union[str, Any] = None
else:
with open(__UpperCAmelCase ) as speaker_embeddings_json:
__a : Optional[Any] = json.load(__UpperCAmelCase )
else:
__a : Any = None
__a : List[str] = AutoTokenizer.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
return cls(tokenizer=__UpperCAmelCase , speaker_embeddings=__UpperCAmelCase )
def UpperCAmelCase__ (self: int , __UpperCAmelCase: Union[str, Any] , __UpperCAmelCase: List[Any]="speaker_embeddings_path.json" , __UpperCAmelCase: str="speaker_embeddings" , __UpperCAmelCase: bool = False , **__UpperCAmelCase: Optional[int] , ) -> List[str]:
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(__UpperCAmelCase , __UpperCAmelCase , "v2" ) , exist_ok=__UpperCAmelCase )
__a : Dict = {}
__a : Dict = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
__a : List[Any] = self._load_voice_preset(__UpperCAmelCase )
__a : Union[str, Any] = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"] , __UpperCAmelCase , f'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=__UpperCAmelCase , )
__a : Dict = os.path.join(__UpperCAmelCase , f'{prompt_key}_{key}.npy' )
__a : int = tmp_dict
with open(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , "w" ) as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
super().save_pretrained(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase__ (self: List[Any] , __UpperCAmelCase: str = None , **__UpperCAmelCase: List[Any] ) -> Optional[Any]:
'''simple docstring'''
__a : Union[str, Any] = self.speaker_embeddings[voice_preset]
__a : Optional[int] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
__a : int = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] , subfolder=kwargs.pop("subfolder" , __UpperCAmelCase ) , cache_dir=kwargs.pop("cache_dir" , __UpperCAmelCase ) , force_download=kwargs.pop("force_download" , __UpperCAmelCase ) , proxies=kwargs.pop("proxies" , __UpperCAmelCase ) , resume_download=kwargs.pop("resume_download" , __UpperCAmelCase ) , local_files_only=kwargs.pop("local_files_only" , __UpperCAmelCase ) , use_auth_token=kwargs.pop("use_auth_token" , __UpperCAmelCase ) , revision=kwargs.pop("revision" , __UpperCAmelCase ) , )
if path is None:
raise ValueError(
f'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
__a : int = np.load(__UpperCAmelCase )
return voice_preset_dict
def UpperCAmelCase__ (self: int , __UpperCAmelCase: Optional[dict] = None ) -> List[str]:
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__(self: List[str] , __UpperCAmelCase: Dict=None , __UpperCAmelCase: Dict=None , __UpperCAmelCase: Optional[int]="pt" , __UpperCAmelCase: Tuple=256 , __UpperCAmelCase: str=False , __UpperCAmelCase: Tuple=True , __UpperCAmelCase: List[str]=False , **__UpperCAmelCase: int , ) -> Tuple:
'''simple docstring'''
if voice_preset is not None and not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
if (
isinstance(__UpperCAmelCase , __UpperCAmelCase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
__a : Optional[int] = self._load_voice_preset(__UpperCAmelCase )
else:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and not voice_preset.endswith(".npz" ):
__a : Union[str, Any] = voice_preset + ".npz"
__a : Optional[int] = np.load(__UpperCAmelCase )
if voice_preset is not None:
self._validate_voice_preset_dict(__UpperCAmelCase , **__UpperCAmelCase )
__a : Any = BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
__a : str = self.tokenizer(
__UpperCAmelCase , return_tensors=__UpperCAmelCase , padding="max_length" , max_length=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
if voice_preset is not None:
__a : Optional[int] = voice_preset
return encoded_text
| 351
|
def a_ (__A ) -> Dict:
"""simple docstring"""
if not head:
return True
# split the list to two parts
__a , __a : Any = head.next, head
while fast and fast.next:
__a : Optional[int] = fast.next.next
__a : Optional[int] = slow.next
__a : Optional[int] = slow.next
__a : Tuple = None # Don't forget here! But forget still works!
# reverse the second part
__a : Any = None
while second:
__a : int = second.next
__a : int = node
__a : Union[str, Any] = second
__a : Union[str, Any] = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
__a : int = node.next
__a : Dict = head.next
return True
def a_ (__A ) -> Union[str, Any]:
"""simple docstring"""
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
__a : Union[str, Any] = head
while fast and fast.next:
__a , __a : List[str] = fast.next.next, slow.next
# 2. Push the second half into the stack
__a : Dict = [slow.val]
while slow.next:
__a : Optional[Any] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
__a : List[str] = cur.next
return True
def a_ (__A ) -> List[str]:
"""simple docstring"""
if not head or not head.next:
return True
__a : Optional[int] = {}
__a : Optional[int] = 0
while head:
if head.val in d:
d[head.val].append(__A )
else:
__a : Dict = [pos]
__a : Dict = head.next
pos += 1
__a : Dict = pos - 1
__a : Optional[int] = 0
for v in d.values():
if len(__A ) % 2 != 0:
middle += 1
else:
__a : Dict = 0
for i in range(0 , len(__A ) ):
if v[i] + v[len(__A ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 351
| 1
|
"""simple docstring"""
import os
from distutils.util import strtobool
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
for e in env_keys:
lowercase__ : Optional[Any] = int(os.environ.get(__lowerCamelCase , -1 ) )
if val >= 0:
return val
return default
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase=False ) -> str:
lowercase__ : Optional[int] = os.environ.get(__lowerCamelCase , str(__lowerCamelCase ) )
return strtobool(__lowerCamelCase ) == 1 # As its name indicates `strtobool` actually returns an int...
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase="no" ) -> List[Any]:
lowercase__ : Dict = os.environ.get(__lowerCamelCase , str(__lowerCamelCase ) )
return value
| 122
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : torch.FloatTensor
lowerCAmelCase : torch.FloatTensor
lowerCAmelCase : Optional[torch.FloatTensor] = None
class __A ( A_ ,A_ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = 2
@register_to_config
def __init__( self : int ,_snake_case : float = 0.02 ,_snake_case : float = 100 ,_snake_case : float = 1.007 ,_snake_case : float = 80 ,_snake_case : float = 0.05 ,_snake_case : float = 50 ,) -> List[Any]:
"""simple docstring"""
lowercase__ : Dict = sigma_max
# setable values
lowercase__ : int = None
lowercase__ : np.IntTensor = None
lowercase__ : torch.FloatTensor = None # sigma(t_i)
def UpperCAmelCase ( self : Dict ,_snake_case : torch.FloatTensor ,_snake_case : Optional[int] = None ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def UpperCAmelCase ( self : str ,_snake_case : int ,_snake_case : Union[str, torch.device] = None ) -> int:
"""simple docstring"""
lowercase__ : Optional[int] = num_inference_steps
lowercase__ : Optional[Any] = np.arange(0 ,self.num_inference_steps )[::-1].copy()
lowercase__ : Optional[Any] = torch.from_numpy(_snake_case ).to(_snake_case )
lowercase__ : Tuple = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
lowercase__ : Union[str, Any] = torch.tensor(_snake_case ,dtype=torch.floataa ,device=_snake_case )
def UpperCAmelCase ( self : List[str] ,_snake_case : torch.FloatTensor ,_snake_case : float ,_snake_case : Optional[torch.Generator] = None ) -> Tuple[torch.FloatTensor, float]:
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
lowercase__ : Optional[Any] = min(self.config.s_churn / self.num_inference_steps ,2**0.5 - 1 )
else:
lowercase__ : Any = 0
# sample eps ~ N(0, S_noise^2 * I)
lowercase__ : List[Any] = self.config.s_noise * randn_tensor(sample.shape ,generator=_snake_case ).to(sample.device )
lowercase__ : Optional[int] = sigma + gamma * sigma
lowercase__ : Optional[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : torch.FloatTensor ,_snake_case : float ,_snake_case : float ,_snake_case : torch.FloatTensor ,_snake_case : bool = True ,) -> Union[KarrasVeOutput, Tuple]:
"""simple docstring"""
lowercase__ : List[str] = sample_hat + sigma_hat * model_output
lowercase__ : Optional[int] = (sample_hat - pred_original_sample) / sigma_hat
lowercase__ : int = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=_snake_case ,derivative=_snake_case ,pred_original_sample=_snake_case )
def UpperCAmelCase ( self : int ,_snake_case : torch.FloatTensor ,_snake_case : float ,_snake_case : float ,_snake_case : torch.FloatTensor ,_snake_case : torch.FloatTensor ,_snake_case : torch.FloatTensor ,_snake_case : bool = True ,) -> Union[KarrasVeOutput, Tuple]:
"""simple docstring"""
lowercase__ : str = sample_prev + sigma_prev * model_output
lowercase__ : str = (sample_prev - pred_original_sample) / sigma_prev
lowercase__ : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=_snake_case ,derivative=_snake_case ,pred_original_sample=_snake_case )
def UpperCAmelCase ( self : List[str] ,_snake_case : List[str] ,_snake_case : Dict ,_snake_case : Optional[Any] ) -> List[str]:
"""simple docstring"""
raise NotImplementedError()
| 122
| 1
|
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
_lowerCamelCase = """docs/source/en/_toctree.yml"""
def a__ ( _SCREAMING_SNAKE_CASE : str ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = defaultdict(_SCREAMING_SNAKE_CASE )
for doc in model_doc:
counts[doc["local"]] += 1
UpperCAmelCase_ : Optional[Any] = [key for key, value in counts.items() if value > 1]
UpperCAmelCase_ : List[str] = []
for duplicate_key in duplicates:
UpperCAmelCase_ : List[str] = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key} )
if len(_SCREAMING_SNAKE_CASE ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1] )
# Sort
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : s["title"].lower() )
def a__ ( _SCREAMING_SNAKE_CASE : Tuple=False ) -> List[Any]:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as f:
UpperCAmelCase_ : int = yaml.safe_load(f.read() )
# Get to the API doc
UpperCAmelCase_ : Optional[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCAmelCase_ : Tuple = content[api_idx]["sections"]
# Then to the model doc
UpperCAmelCase_ : List[Any] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
UpperCAmelCase_ : List[str] = api_doc[model_idx]["sections"]
UpperCAmelCase_ : List[str] = [(idx, section) for idx, section in enumerate(_SCREAMING_SNAKE_CASE ) if "sections" in section]
UpperCAmelCase_ : Optional[Any] = False
for idx, modality_doc in modalities_docs:
UpperCAmelCase_ : Dict = modality_doc["sections"]
UpperCAmelCase_ : Optional[int] = clean_model_doc_toc(_SCREAMING_SNAKE_CASE )
if old_modality_doc != new_modality_doc:
UpperCAmelCase_ : Union[str, Any] = True
if overwrite:
UpperCAmelCase_ : Dict = new_modality_doc
if diff:
if overwrite:
UpperCAmelCase_ : List[str] = model_doc
UpperCAmelCase_ : List[str] = api_doc
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(_SCREAMING_SNAKE_CASE , allow_unicode=_SCREAMING_SNAKE_CASE ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
_lowerCamelCase = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 71
|
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def lowerCAmelCase__( lowercase : str ) -> List[str]:
__snake_case : Any = int(lowercase )
__snake_case , __snake_case , __snake_case : List[str] = t // 3600, (t // 60) % 60, t % 60
return f"""{h}:{m:02d}:{s:02d}""" if h != 0 else f"""{m:02d}:{s:02d}"""
def lowerCAmelCase__( lowercase : Tuple , lowercase : List[Any] , lowercase : Any , lowercase : Tuple , lowercase : Dict=300 ) -> int:
# docstyle-ignore
return f"""
<div>
{prefix}
<progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>
{label}
</div>
"""
def lowerCAmelCase__( lowercase : Dict ) -> Union[str, Any]:
__snake_case : Any = "<table border=\"1\" class=\"dataframe\">\n"
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f""" <th>{i}</th>\n"""
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
__snake_case : List[str] = f"""{elt:.6f}""" if isinstance(lowercase , lowercase ) else str(lowercase )
html_code += f""" <td>{elt}</td>\n"""
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class _lowerCamelCase :
"""simple docstring"""
UpperCAmelCase_ : List[str] =5
UpperCAmelCase_ : Optional[int] =0.2
def __init__( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = 300 , ) -> List[Any]:
'''simple docstring'''
__snake_case : Dict = total
__snake_case : List[Any] = "" if prefix is None else prefix
__snake_case : Any = leave
__snake_case : Optional[Any] = parent
__snake_case : Any = width
__snake_case : List[Any] = None
__snake_case : str = None
__snake_case : Union[str, Any] = None
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = False , UpperCAmelCase = None ) -> Optional[Any]:
'''simple docstring'''
__snake_case : int = value
if comment is not None:
__snake_case : str = comment
if self.last_value is None:
__snake_case : Optional[Any] = time.time()
__snake_case : Union[str, Any] = value
__snake_case : Optional[int] = None
__snake_case : Optional[int] = self.warmup
__snake_case : Optional[int] = 1
self.update_bar(UpperCAmelCase )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
__snake_case : List[str] = time.time()
__snake_case : Optional[Any] = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
__snake_case : List[str] = self.elapsed_time / (value - self.start_value)
else:
__snake_case : str = None
if value >= self.total:
__snake_case : Union[str, Any] = self.total
__snake_case : Dict = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
__snake_case : Tuple = self.average_time_per_item * (self.total - value)
self.update_bar(UpperCAmelCase )
__snake_case : str = value
__snake_case : Union[str, Any] = current_time
if self.average_time_per_item is None:
__snake_case : int = 1
else:
__snake_case : Any = max(int(self.update_every / self.average_time_per_item ) , 1 )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase=None ) -> List[str]:
'''simple docstring'''
__snake_case : List[str] = " " * (len(str(self.total ) ) - len(str(UpperCAmelCase ) )) + str(UpperCAmelCase )
if self.elapsed_time is None:
__snake_case : List[str] = F"""[{spaced_value}/{self.total} : < :"""
elif self.predicted_remaining is None:
__snake_case : int = F"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )}"""
else:
__snake_case : List[str] = (
F"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <"""
F""" {format_time(self.predicted_remaining )}"""
)
self.label += F""", {1/self.average_time_per_item:.2f} it/s"""
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F""", {self.comment}]"""
self.display()
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case : Optional[Any] = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
__snake_case : List[Any] = disp.display(disp.HTML(self.html_code ) , display_id=UpperCAmelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
if self.parent is None and self.output is not None:
self.output.update(disp.HTML("" ) )
class _lowerCamelCase ( a ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=None ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(UpperCAmelCase )
__snake_case : Optional[Any] = None if column_names is None else [column_names]
__snake_case : Union[str, Any] = None
def UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Optional[int] = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
__snake_case : Tuple = disp.display(disp.HTML(self.html_code ) , display_id=UpperCAmelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def UpperCAmelCase ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
if self.inner_table is None:
__snake_case : str = [list(values.keys() ), list(values.values() )]
else:
__snake_case : List[Any] = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(UpperCAmelCase )
__snake_case : Any = columns
self.inner_table.append([values[c] for c in columns] )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=300 ) -> str:
'''simple docstring'''
__snake_case : Union[str, Any] = NotebookProgressBar(UpperCAmelCase , prefix=UpperCAmelCase , parent=self , width=UpperCAmelCase )
return self.child_bar
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case : List[str] = None
self.display()
class _lowerCamelCase ( a ):
"""simple docstring"""
def __init__( self ) -> str:
'''simple docstring'''
__snake_case : List[str] = None
__snake_case : List[Any] = None
__snake_case : Dict = False
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__snake_case : str = "Epoch" if args.evaluation_strategy == IntervalStrategy.EPOCH else "Step"
__snake_case : Optional[Any] = 0
__snake_case : Tuple = 0
__snake_case : str = [self.first_column] + ["Training Loss"]
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append("Validation Loss" )
__snake_case : int = NotebookTrainingTracker(state.max_steps , UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Any:
'''simple docstring'''
__snake_case : List[str] = int(state.epoch ) if int(state.epoch ) == state.epoch else F"""{state.epoch:.2f}"""
self.training_tracker.update(
state.global_step + 1 , comment=F"""Epoch {epoch}/{state.num_train_epochs}""" , force_update=self._force_next_update , )
__snake_case : Optional[Any] = False
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , **UpperCAmelCase ) -> Any:
'''simple docstring'''
if not has_length(UpperCAmelCase ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
__snake_case : List[str] = self.training_tracker.add_child(len(UpperCAmelCase ) )
else:
__snake_case : int = NotebookProgressBar(len(UpperCAmelCase ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
if self.prediction_bar is not None:
self.prediction_bar.close()
__snake_case : Optional[Any] = None
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , **UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
__snake_case : int = {"Training Loss": logs["loss"]}
# First column is necessarily Step sine we're not in epoch eval strategy
__snake_case : Optional[Any] = state.global_step
self.training_tracker.write_line(UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , **UpperCAmelCase ) -> int:
'''simple docstring'''
if self.training_tracker is not None:
__snake_case : Optional[Any] = {"Training Loss": "No log", "Validation Loss": "No log"}
for log in reversed(state.log_history ):
if "loss" in log:
__snake_case : Optional[int] = log["loss"]
break
if self.first_column == "Epoch":
__snake_case : Union[str, Any] = int(state.epoch )
else:
__snake_case : List[Any] = state.global_step
__snake_case : Tuple = "eval"
for k in metrics:
if k.endswith("_loss" ):
__snake_case : Any = re.sub(r"\_loss$" , "" , UpperCAmelCase )
__snake_case : Dict = metrics.pop("total_flos" , UpperCAmelCase )
__snake_case : Optional[Any] = metrics.pop("epoch" , UpperCAmelCase )
__snake_case : int = metrics.pop(F"""{metric_key_prefix}_runtime""" , UpperCAmelCase )
__snake_case : List[str] = metrics.pop(F"""{metric_key_prefix}_samples_per_second""" , UpperCAmelCase )
__snake_case : Union[str, Any] = metrics.pop(F"""{metric_key_prefix}_steps_per_second""" , UpperCAmelCase )
__snake_case : Union[str, Any] = metrics.pop(F"""{metric_key_prefix}_jit_compilation_time""" , UpperCAmelCase )
for k, v in metrics.items():
if k == F"""{metric_key_prefix}_loss""":
__snake_case : Tuple = v
else:
__snake_case : str = k.split("_" )
__snake_case : int = " ".join([part.capitalize() for part in splits[1:]] )
__snake_case : Optional[int] = v
self.training_tracker.write_line(UpperCAmelCase )
self.training_tracker.remove_child()
__snake_case : Optional[Any] = None
# Evaluation takes a long time so we should force the next update.
__snake_case : Any = True
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Any:
'''simple docstring'''
self.training_tracker.update(
state.global_step , comment=F"""Epoch {int(state.epoch )}/{state.num_train_epochs}""" , force_update=UpperCAmelCase )
__snake_case : Optional[Any] = None
| 243
| 0
|
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
lowerCamelCase : Dict = logging.getLogger(__name__)
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''sequence-classification'''
def __init__( self : Optional[Any] , A_ : int ) -> Tuple:
"""simple docstring"""
if type(A_ ) == dict:
lowerCamelCase_ = Namespace(**A_ )
lowerCamelCase_ = glue_output_modes[hparams.task]
lowerCamelCase_ = glue_tasks_num_labels[hparams.task]
super().__init__(A_ , A_ , self.mode )
def a__ ( self : List[Any] , **A_ : str ) -> List[Any]:
"""simple docstring"""
return self.model(**A_ )
def a__ ( self : int , A_ : Any , A_ : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCamelCase_ = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
lowerCamelCase_ = self(**A_ )
lowerCamelCase_ = outputs[0]
lowerCamelCase_ = self.trainer.lr_schedulers[0]['scheduler']
lowerCamelCase_ = {'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = self.hparams
lowerCamelCase_ = processors[args.task]()
lowerCamelCase_ = processor.get_labels()
for mode in ["train", "dev"]:
lowerCamelCase_ = self._feature_file(A_ )
if os.path.exists(A_ ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , A_ )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
lowerCamelCase_ = (
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
lowerCamelCase_ = convert_examples_to_features(
A_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , A_ )
torch.save(A_ , A_ )
def a__ ( self : Dict , A_ : str , A_ : int , A_ : bool = False ) -> DataLoader:
"""simple docstring"""
lowerCamelCase_ = 'dev' if mode == 'test' else mode
lowerCamelCase_ = self._feature_file(A_ )
logger.info('Loading features from cached file %s' , A_ )
lowerCamelCase_ = torch.load(A_ )
lowerCamelCase_ = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowerCamelCase_ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
lowerCamelCase_ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
lowerCamelCase_ = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
lowerCamelCase_ = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(A_ , A_ , A_ , A_ ) , batch_size=A_ , shuffle=A_ , )
def a__ ( self : Tuple , A_ : Any , A_ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCamelCase_ = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
lowerCamelCase_ = self(**A_ )
lowerCamelCase_ , lowerCamelCase_ = outputs[:2]
lowerCamelCase_ = logits.detach().cpu().numpy()
lowerCamelCase_ = inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def a__ ( self : List[Any] , A_ : Dict ) -> tuple:
"""simple docstring"""
lowerCamelCase_ = torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
lowerCamelCase_ = np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
lowerCamelCase_ = np.argmax(A_ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
lowerCamelCase_ = np.squeeze(A_ )
lowerCamelCase_ = np.concatenate([x['target'] for x in outputs] , axis=0 )
lowerCamelCase_ = [[] for _ in range(out_label_ids.shape[0] )]
lowerCamelCase_ = [[] for _ in range(out_label_ids.shape[0] )]
lowerCamelCase_ = {**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , A_ , A_ )}
lowerCamelCase_ = dict(results.items() )
lowerCamelCase_ = results
return ret, preds_list, out_label_list
def a__ ( self : Union[str, Any] , A_ : list ) -> dict:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = self._eval_end(A_ )
lowerCamelCase_ = ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def a__ ( self : List[str] , A_ : Union[str, Any] ) -> dict:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = self._eval_end(A_ )
lowerCamelCase_ = ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def a__ ( A_ : Optional[Any] , A_ : List[Any] ) -> Tuple:
"""simple docstring"""
BaseTransformer.add_model_specific_args(A_ , A_ )
parser.add_argument(
'--max_seq_length' , default=128 , type=A_ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=A_ , required=A_ , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=A_ , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = argparse.ArgumentParser()
add_generic_args(lowercase , os.getcwd() )
lowerCamelCase_ = GLUETransformer.add_model_specific_args(lowercase , os.getcwd() )
lowerCamelCase_ = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
lowerCamelCase_ = os.path.join(
'./results' , f"""{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}""" , )
os.makedirs(args.output_dir )
lowerCamelCase_ = GLUETransformer(lowercase )
lowerCamelCase_ = generic_train(lowercase , lowercase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
lowerCamelCase_ = sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=lowercase ) )
lowerCamelCase_ = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(lowercase )
if __name__ == "__main__":
main()
| 651
|
import cva
import numpy as np
class A:
'''simple docstring'''
def __init__( self : int , A_ : float , A_ : int ) -> List[Any]:
"""simple docstring"""
if k in (0.04, 0.06):
lowerCamelCase_ = k
lowerCamelCase_ = window_size
else:
raise ValueError('invalid k value' )
def __str__( self : str ) -> str:
"""simple docstring"""
return str(self.k )
def a__ ( self : Any , A_ : str ) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
lowerCamelCase_ = cva.imread(A_ , 0 )
lowerCamelCase_ , lowerCamelCase_ = img.shape
lowerCamelCase_ = []
lowerCamelCase_ = img.copy()
lowerCamelCase_ = cva.cvtColor(A_ , cva.COLOR_GRAY2RGB )
lowerCamelCase_ , lowerCamelCase_ = np.gradient(A_ )
lowerCamelCase_ = dx**2
lowerCamelCase_ = dy**2
lowerCamelCase_ = dx * dy
lowerCamelCase_ = 0.04
lowerCamelCase_ = self.window_size // 2
for y in range(A_ , h - offset ):
for x in range(A_ , w - offset ):
lowerCamelCase_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase_ = (wxx * wyy) - (wxy**2)
lowerCamelCase_ = wxx + wyy
lowerCamelCase_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCamelCase : Optional[int] = HarrisCorner(0.04, 3)
lowerCamelCase , lowerCamelCase : Optional[int] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 651
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class _a ( SCREAMING_SNAKE_CASE__):
__magic_name__ = """xmod"""
def __init__( self : List[Any] , _lowercase : int=30522 , _lowercase : str=768 , _lowercase : int=12 , _lowercase : Any=12 , _lowercase : Optional[Any]=3072 , _lowercase : Tuple="gelu" , _lowercase : int=0.1 , _lowercase : Tuple=0.1 , _lowercase : Tuple=512 , _lowercase : Dict=2 , _lowercase : Dict=0.02 , _lowercase : int=1E-12 , _lowercase : int=1 , _lowercase : List[str]=0 , _lowercase : Dict=2 , _lowercase : Any="absolute" , _lowercase : int=True , _lowercase : List[str]=None , _lowercase : List[Any]=False , _lowercase : Union[str, Any]=2 , _lowercase : Union[str, Any]=False , _lowercase : str=True , _lowercase : int=True , _lowercase : str=("en_XX",) , _lowercase : int=None , **_lowercase : Optional[int] , ) -> Optional[Any]:
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
snake_case : str = vocab_size
snake_case : Union[str, Any] = hidden_size
snake_case : int = num_hidden_layers
snake_case : Dict = num_attention_heads
snake_case : Any = hidden_act
snake_case : str = intermediate_size
snake_case : Optional[Any] = hidden_dropout_prob
snake_case : Tuple = attention_probs_dropout_prob
snake_case : int = max_position_embeddings
snake_case : Tuple = type_vocab_size
snake_case : List[str] = initializer_range
snake_case : str = layer_norm_eps
snake_case : Optional[int] = position_embedding_type
snake_case : Tuple = use_cache
snake_case : Dict = classifier_dropout
snake_case : Tuple = pre_norm
snake_case : Optional[int] = adapter_reduction_factor
snake_case : Optional[int] = adapter_layer_norm
snake_case : Union[str, Any] = adapter_reuse_layer_norm
snake_case : List[str] = ln_before_adapter
snake_case : List[Any] = list(_lowercase )
snake_case : str = default_language
class _a ( SCREAMING_SNAKE_CASE__):
@property
def __lowercase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case : Any = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 449
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
A = '\nHuman: <<task>>\n\nAssistant: '
A = 'huggingface-tools/default-prompts'
A = {'chat': 'chat_prompt_template.txt', 'run': 'run_prompt_template.txt'}
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Optional[int] , lowerCamelCase_: str , lowerCamelCase_: Tuple="run" ):
"""simple docstring"""
if prompt_or_repo_id is None:
snake_case : Any = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , lowerCamelCase_ ) is not None:
return prompt_or_repo_id
snake_case : Optional[int] = cached_file(
lowerCamelCase_ , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(lowerCamelCase_ , "r" , encoding="utf-8" ) as f:
return f.read()
| 449
| 1
|
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
__lowerCAmelCase = False
try:
__lowerCAmelCase = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class __magic_name__ :
def __init__( self : Optional[Any] ,__SCREAMING_SNAKE_CASE : int = None ,__SCREAMING_SNAKE_CASE : str = [] ):
UpperCAmelCase = 0
UpperCAmelCase = choices
UpperCAmelCase = prompt
if sys.platform == "win32":
UpperCAmelCase = "*"
else:
UpperCAmelCase = "➔ "
def _UpperCAmelCase ( self : Dict ,__SCREAMING_SNAKE_CASE : Optional[Any] ,__SCREAMING_SNAKE_CASE : List[Any] = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] ,3_2 ,__SCREAMING_SNAKE_CASE )
else:
forceWrite(self.choices[index] ,__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : List[Any] ,__SCREAMING_SNAKE_CASE : int ):
if index == self.position:
forceWrite(f''' {self.arrow_char} ''' )
self.write_choice(__SCREAMING_SNAKE_CASE )
else:
forceWrite(f''' {self.choices[index]}''' )
reset_cursor()
def _UpperCAmelCase ( self : Union[str, Any] ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : Union[str, Any] = 1 ):
UpperCAmelCase = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__SCREAMING_SNAKE_CASE )
move_cursor(__SCREAMING_SNAKE_CASE ,direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def _UpperCAmelCase ( self : Optional[Any] ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def _UpperCAmelCase ( self : Union[str, Any] ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def _UpperCAmelCase ( self : Tuple ):
move_cursor(len(self.choices ) - self.position ,"DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def _UpperCAmelCase ( self : int ):
move_cursor(len(self.choices ) - self.position ,"DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__SCREAMING_SNAKE_CASE )] for number in range(1_0 )] )
def _UpperCAmelCase ( self : List[Any] ):
UpperCAmelCase = int(chr(self.current_selection ) )
UpperCAmelCase = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP ,-movement )
elif self.position < index:
self.move_direction(Direction.DOWN ,__SCREAMING_SNAKE_CASE )
else:
return
else:
return
def _UpperCAmelCase ( self : int ,__SCREAMING_SNAKE_CASE : Union[str, Any] = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt ,"\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" ,"\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" ,"\n" )
UpperCAmelCase = default_choice
for i in range(len(self.choices ) ):
self.print_choice(__SCREAMING_SNAKE_CASE )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position ,"UP" )
with cursor.hide():
while True:
if in_colab:
try:
UpperCAmelCase = int(builtins.input() )
except ValueError:
UpperCAmelCase = default_choice
else:
UpperCAmelCase = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 ,"UP" )
clear_line()
self.write_choice(__SCREAMING_SNAKE_CASE ,"\n" )
return choice
| 713
|
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __magic_name__ ( _a):
@require_torch
def _UpperCAmelCase ( self : Tuple ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
UpperCAmelCase = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
UpperCAmelCase = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
UpperCAmelCase = "\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
UpperCAmelCase = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
BertModel.from_pretrained(__SCREAMING_SNAKE_CASE )
BertTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
pipeline(task="fill-mask" ,model=__SCREAMING_SNAKE_CASE )
# baseline - just load from_pretrained with normal network
UpperCAmelCase = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
UpperCAmelCase = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCAmelCase = "1"
UpperCAmelCase = subprocess.run(__SCREAMING_SNAKE_CASE ,env=__SCREAMING_SNAKE_CASE ,check=__SCREAMING_SNAKE_CASE ,capture_output=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn("success" ,result.stdout.decode() )
@require_torch
def _UpperCAmelCase ( self : Optional[int] ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
UpperCAmelCase = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
UpperCAmelCase = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
UpperCAmelCase = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
UpperCAmelCase = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
BertModel.from_pretrained(__SCREAMING_SNAKE_CASE )
BertTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
pipeline(task="fill-mask" ,model=__SCREAMING_SNAKE_CASE )
# baseline - just load from_pretrained with normal network
UpperCAmelCase = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
UpperCAmelCase = self.get_env()
UpperCAmelCase = subprocess.run(__SCREAMING_SNAKE_CASE ,env=__SCREAMING_SNAKE_CASE ,check=__SCREAMING_SNAKE_CASE ,capture_output=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn("success" ,result.stdout.decode() )
@require_torch
def _UpperCAmelCase ( self : str ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
UpperCAmelCase = "\nfrom transformers import BertConfig, BertModel, BertTokenizer\n "
UpperCAmelCase = "\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n "
UpperCAmelCase = "\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
# baseline - just load from_pretrained with normal network
UpperCAmelCase = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
UpperCAmelCase = self.get_env()
UpperCAmelCase = subprocess.run(__SCREAMING_SNAKE_CASE ,env=__SCREAMING_SNAKE_CASE ,check=__SCREAMING_SNAKE_CASE ,capture_output=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn("success" ,result.stdout.decode() )
# next emulate no network
UpperCAmelCase = [sys.executable, "-c", "\n".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCAmelCase = "1"
UpperCAmelCase = subprocess.run(__SCREAMING_SNAKE_CASE ,env=__SCREAMING_SNAKE_CASE ,check=__SCREAMING_SNAKE_CASE ,capture_output=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn("success" ,result.stdout.decode() )
@require_torch
def _UpperCAmelCase ( self : Dict ):
UpperCAmelCase = "\nfrom transformers import pipeline\n "
UpperCAmelCase = "\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n "
UpperCAmelCase = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
UpperCAmelCase = self.get_env()
UpperCAmelCase = "1"
UpperCAmelCase = [sys.executable, "-c", "\n".join([load, mock, run] )]
UpperCAmelCase = subprocess.run(__SCREAMING_SNAKE_CASE ,env=__SCREAMING_SNAKE_CASE ,check=__SCREAMING_SNAKE_CASE ,capture_output=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode ,1 ,result.stderr )
self.assertIn(
"You cannot infer task automatically within `pipeline` when using offline mode" ,result.stderr.decode().replace("\n" ,"" ) ,)
@require_torch
def _UpperCAmelCase ( self : Any ):
UpperCAmelCase = "\nfrom transformers import AutoModel\n "
UpperCAmelCase = "\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n "
# baseline - just load from_pretrained with normal network
UpperCAmelCase = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
UpperCAmelCase = self.get_env()
UpperCAmelCase = subprocess.run(__SCREAMING_SNAKE_CASE ,env=__SCREAMING_SNAKE_CASE ,check=__SCREAMING_SNAKE_CASE ,capture_output=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn("success" ,result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCAmelCase = "1"
UpperCAmelCase = subprocess.run(__SCREAMING_SNAKE_CASE ,env=__SCREAMING_SNAKE_CASE ,check=__SCREAMING_SNAKE_CASE ,capture_output=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn("success" ,result.stdout.decode() )
| 405
| 0
|
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if isinstance(lowerCamelCase , torch.Tensor ):
return image
elif isinstance(lowerCamelCase , PIL.Image.Image ):
a__ : List[str] = [image]
if isinstance(image[0] , PIL.Image.Image ):
a__ : Optional[int] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
a__ : Dict = np.concatenate(lowerCamelCase , axis=0 )
a__ : Union[str, Any] = np.array(lowerCamelCase ).astype(np.floataa ) / 255.0
a__ : int = image.transpose(0 , 3 , 1 , 2 )
a__ : List[Any] = 2.0 * image - 1.0
a__ : Tuple = torch.from_numpy(lowerCamelCase )
elif isinstance(image[0] , torch.Tensor ):
a__ : str = torch.cat(lowerCamelCase , dim=0 )
return image
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=0.9995 ):
if not isinstance(lowerCamelCase , np.ndarray ):
a__ : Dict = True
a__ : Dict = va.device
a__ : List[str] = va.cpu().numpy()
a__ : str = va.cpu().numpy()
a__ : Optional[Any] = np.sum(va * va / (np.linalg.norm(lowerCamelCase ) * np.linalg.norm(lowerCamelCase )) )
if np.abs(lowerCamelCase ) > DOT_THRESHOLD:
a__ : Dict = (1 - t) * va + t * va
else:
a__ : str = np.arccos(lowerCamelCase )
a__ : List[Any] = np.sin(lowerCamelCase )
a__ : Optional[int] = theta_a * t
a__ : Tuple = np.sin(lowerCamelCase )
a__ : Any = np.sin(theta_a - theta_t ) / sin_theta_a
a__ : Any = sin_theta_t / sin_theta_a
a__ : Optional[int] = sa * va + sa * va
if inputs_are_torch:
a__ : Optional[int] = torch.from_numpy(lowerCamelCase ).to(lowerCamelCase )
return va
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : Optional[Any] = F.normalize(lowerCamelCase , dim=-1 )
a__ : Union[str, Any] = F.normalize(lowerCamelCase , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def _A ( lowerCamelCase , lowerCamelCase ):
for param in model.parameters():
a__ : int = value
class __lowerCAmelCase ( _UpperCamelCase ):
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case=None , snake_case=None , snake_case=None , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
self.register_modules(
vae=snake_case , text_encoder=snake_case , clip_model=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , feature_extractor=snake_case , coca_model=snake_case , coca_tokenizer=snake_case , coca_transform=snake_case , )
a__ : Union[str, Any] = (
feature_extractor.size
if isinstance(feature_extractor.size , snake_case )
else feature_extractor.size["shortest_edge"]
)
a__ : Optional[Any] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , snake_case )
set_requires_grad(self.clip_model , snake_case )
def _snake_case ( self , snake_case = "auto" ) -> str:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
a__ : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case )
def _snake_case ( self ) -> Dict:
"""simple docstring"""
self.enable_attention_slicing(snake_case )
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
set_requires_grad(self.vae , snake_case )
def _snake_case ( self ) -> int:
"""simple docstring"""
set_requires_grad(self.vae , snake_case )
def _snake_case ( self ) -> Any:
"""simple docstring"""
set_requires_grad(self.unet , snake_case )
def _snake_case ( self ) -> int:
"""simple docstring"""
set_requires_grad(self.unet , snake_case )
def _snake_case ( self , snake_case , snake_case , snake_case ) -> int:
"""simple docstring"""
a__ : Dict = min(int(num_inference_steps * strength ) , snake_case )
a__ : List[str] = max(num_inference_steps - init_timestep , 0 )
a__ : Optional[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case=None ) -> Tuple:
"""simple docstring"""
if not isinstance(snake_case , torch.Tensor ):
raise ValueError(F"""`image` has to be of type `torch.Tensor` but is {type(snake_case )}""" )
a__ : List[str] = image.to(device=snake_case , dtype=snake_case )
if isinstance(snake_case , snake_case ):
a__ : Optional[int] = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(snake_case )
]
a__ : List[Any] = torch.cat(snake_case , dim=0 )
else:
a__ : int = self.vae.encode(snake_case ).latent_dist.sample(snake_case )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
a__ : Optional[int] = 0.18_215 * init_latents
a__ : Any = init_latents.repeat_interleave(snake_case , dim=0 )
a__ : Optional[int] = randn_tensor(init_latents.shape , generator=snake_case , device=snake_case , dtype=snake_case )
# get latents
a__ : Tuple = self.scheduler.add_noise(snake_case , snake_case , snake_case )
a__ : Any = init_latents
return latents
def _snake_case ( self , snake_case ) -> Dict:
"""simple docstring"""
a__ : Dict = self.coca_transform(snake_case ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
a__ : Dict = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
a__ : List[Any] = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("<end_of_text>" )[0].replace("<start_of_text>" , "" ).rstrip(" .," )
def _snake_case ( self , snake_case , snake_case ) -> Tuple:
"""simple docstring"""
a__ : Optional[int] = self.feature_extractor.preprocess(snake_case )
a__ : Optional[Any] = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half()
a__ : Tuple = self.clip_model.get_image_features(snake_case )
a__ : Dict = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=snake_case )
a__ : Optional[int] = image_embeddings_clip.repeat_interleave(snake_case , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ) -> List[Any]:
"""simple docstring"""
a__ : Any = latents.detach().requires_grad_()
a__ : Any = self.scheduler.scale_model_input(snake_case , snake_case )
# predict the noise residual
a__ : int = self.unet(snake_case , snake_case , encoder_hidden_states=snake_case ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
a__ : List[Any] = self.scheduler.alphas_cumprod[timestep]
a__ : List[str] = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
a__ : Optional[Any] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
a__ : Tuple = torch.sqrt(snake_case )
a__ : int = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , snake_case ):
a__ : Union[str, Any] = self.scheduler.sigmas[index]
a__ : Any = latents - sigma * noise_pred
else:
raise ValueError(F"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
a__ : Optional[int] = 1 / 0.18_215 * sample
a__ : Union[str, Any] = self.vae.decode(snake_case ).sample
a__ : List[str] = (image / 2 + 0.5).clamp(0 , 1 )
a__ : Any = transforms.Resize(self.feature_extractor_size )(snake_case )
a__ : Optional[int] = self.normalize(snake_case ).to(latents.dtype )
a__ : Dict = self.clip_model.get_image_features(snake_case )
a__ : Union[str, Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=snake_case )
a__ : Dict = spherical_dist_loss(snake_case , snake_case ).mean() * clip_guidance_scale
a__ : int = -torch.autograd.grad(snake_case , snake_case )[0]
if isinstance(self.scheduler , snake_case ):
a__ : Union[str, Any] = latents.detach() + grads * (sigma**2)
a__ : List[Any] = noise_pred_original
else:
a__ : int = noise_pred_original - torch.sqrt(snake_case ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , snake_case , snake_case , snake_case = None , snake_case = None , snake_case = 512 , snake_case = 512 , snake_case = 0.6 , snake_case = 50 , snake_case = 7.5 , snake_case = 1 , snake_case = 0.0 , snake_case = 100 , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = 0.8 , snake_case = 0.1 , snake_case = 0.1 , ) -> str:
"""simple docstring"""
if isinstance(snake_case , snake_case ) and len(snake_case ) != batch_size:
raise ValueError(F"""You have passed {batch_size} batch_size, but only {len(snake_case )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(snake_case , torch.Generator ) and batch_size > 1:
a__ : List[Any] = [generator] + [None] * (batch_size - 1)
a__ : Tuple = [
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
a__ : Any = [x[0] for x in coca_is_none if x[1]]
a__ : Any = ", ".join(snake_case )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(snake_case ):
raise ValueError(
F"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
F"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
a__ : Tuple = self.get_image_description(snake_case )
if style_prompt is None:
if len(snake_case ):
raise ValueError(
F"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
F""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
a__ : List[Any] = self.get_image_description(snake_case )
# get prompt text embeddings for content and style
a__ : Optional[Any] = self.tokenizer(
snake_case , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=snake_case , return_tensors="pt" , )
a__ : Tuple = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
a__ : str = self.tokenizer(
snake_case , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=snake_case , return_tensors="pt" , )
a__ : List[Any] = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
a__ : Optional[int] = slerp(snake_case , snake_case , snake_case )
# duplicate text embeddings for each generation per prompt
a__ : List[Any] = text_embeddings.repeat_interleave(snake_case , dim=0 )
# set timesteps
a__ : Optional[Any] = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
a__ : Tuple = {}
if accepts_offset:
a__ : Union[str, Any] = 1
self.scheduler.set_timesteps(snake_case , **snake_case )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
a__ , a__ : Optional[Any] = self.get_timesteps(snake_case , snake_case , self.device )
a__ : List[str] = timesteps[:1].repeat(snake_case )
# Preprocess image
a__ : Tuple = preprocess(snake_case , snake_case , snake_case )
a__ : str = self.prepare_latents(
snake_case , snake_case , snake_case , text_embeddings.dtype , self.device , snake_case )
a__ : List[str] = preprocess(snake_case , snake_case , snake_case )
a__ : Tuple = self.prepare_latents(
snake_case , snake_case , snake_case , text_embeddings.dtype , self.device , snake_case )
a__ : Union[str, Any] = slerp(snake_case , snake_case , snake_case )
if clip_guidance_scale > 0:
a__ : int = self.get_clip_image_embeddings(snake_case , snake_case )
a__ : Optional[int] = self.get_clip_image_embeddings(snake_case , snake_case )
a__ : int = slerp(
snake_case , snake_case , snake_case )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
a__ : Optional[int] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
a__ : Dict = content_text_input.input_ids.shape[-1]
a__ : List[str] = self.tokenizer([""] , padding="max_length" , max_length=snake_case , return_tensors="pt" )
a__ : int = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
a__ : Union[str, Any] = uncond_embeddings.repeat_interleave(snake_case , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
a__ : Union[str, Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
a__ : Optional[Any] = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
a__ : Optional[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
a__ : List[str] = torch.randn(snake_case , generator=snake_case , device="cpu" , dtype=snake_case ).to(
self.device )
else:
a__ : Tuple = torch.randn(snake_case , generator=snake_case , device=self.device , dtype=snake_case )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
a__ : Optional[int] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
a__ : Tuple = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
a__ : List[Any] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
a__ : Optional[Any] = {}
if accepts_eta:
a__ : List[str] = eta
# check if the scheduler accepts generator
a__ : Union[str, Any] = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
a__ : List[Any] = generator
with self.progress_bar(total=snake_case ):
for i, t in enumerate(snake_case ):
# expand the latents if we are doing classifier free guidance
a__ : Any = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a__ : List[Any] = self.scheduler.scale_model_input(snake_case , snake_case )
# predict the noise residual
a__ : Tuple = self.unet(snake_case , snake_case , encoder_hidden_states=snake_case ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
a__ , a__ : Union[str, Any] = noise_pred.chunk(2 )
a__ : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
a__ : Dict = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
a__ , a__ : List[str] = self.cond_fn(
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , )
# compute the previous noisy sample x_t -> x_t-1
a__ : List[Any] = self.scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
a__ : List[Any] = 1 / 0.18_215 * latents
a__ : Dict = self.vae.decode(snake_case ).sample
a__ : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
a__ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a__ : str = self.numpy_to_pil(snake_case )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=snake_case , nsfw_content_detected=snake_case )
| 112
|
# Function to print upper half of diamond (pyramid)
def _A ( lowerCamelCase ):
for i in range(0 , lowerCamelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(" " , end="" )
for _ in range(0 , i + 1 ): # printing stars
print("* " , end="" )
print()
def _A ( lowerCamelCase ):
for i in range(lowerCamelCase , 0 , -1 ):
for _ in range(lowerCamelCase , 0 , -1 ): # printing stars
print("* " , end="" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(" " , end="" )
def _A ( lowerCamelCase ):
if n <= 0:
print(" ... .... nothing printing :(" )
return
floyd(lowerCamelCase ) # upper half
reverse_floyd(lowerCamelCase ) # lower half
if __name__ == "__main__":
print(R"""| /\ | |- | |- |--| |\ /| |-""")
print(R"""|/ \| |- |_ |_ |__| | \/ | |_""")
SCREAMING_SNAKE_CASE__ : Dict = 1
while K:
SCREAMING_SNAKE_CASE__ : str = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
SCREAMING_SNAKE_CASE__ : Tuple = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 112
| 1
|
'''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _lowerCamelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple ):
"""simple docstring"""
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def _lowerCamelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : Any , lowerCamelCase_ : str , lowerCamelCase_ : Tuple=True ):
"""simple docstring"""
model.train()
UpperCAmelCase_ : Any = model(__lowerCAmelCase )
UpperCAmelCase_ : List[Any] = F.mse_loss(__lowerCAmelCase , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(__lowerCAmelCase )
def _lowerCamelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any=False ):
"""simple docstring"""
set_seed(42 )
UpperCAmelCase_ : Optional[Any] = RegressionModel()
UpperCAmelCase_ : Tuple = deepcopy(__lowerCAmelCase )
UpperCAmelCase_ : Any = RegressionDataset(length=80 )
UpperCAmelCase_ : str = DataLoader(__lowerCAmelCase , batch_size=16 )
model.to(accelerator.device )
if sched:
UpperCAmelCase_ : Optional[Any] = AdamW(params=model.parameters() , lr=1e-3 )
UpperCAmelCase_ : Any = AdamW(params=ddp_model.parameters() , lr=1e-3 )
UpperCAmelCase_ : Optional[Any] = LambdaLR(__lowerCAmelCase , lr_lambda=lambda lowerCamelCase_ : epoch**0.65 )
UpperCAmelCase_ : Dict = LambdaLR(__lowerCAmelCase , lr_lambda=lambda lowerCamelCase_ : epoch**0.65 )
# Make a copy of `model`
if sched:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _lowerCamelCase ( lowerCamelCase_ : Any ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = get_training_setup(__lowerCAmelCase )
# Use a single batch
UpperCAmelCase_ , UpperCAmelCase_ : Any = next(iter(__lowerCAmelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__lowerCAmelCase ):
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
# Sync grads
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
UpperCAmelCase_ : List[Any] = ddp_input[torch.randperm(len(__lowerCAmelCase ) )]
def _lowerCamelCase ( lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = get_training_setup(__lowerCAmelCase )
# Use a single batch
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = next(iter(__lowerCAmelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ : int = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__lowerCAmelCase ):
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
# Sync grads
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
UpperCAmelCase_ : Dict = ddp_input[torch.randperm(len(__lowerCAmelCase ) )]
def _lowerCamelCase ( lowerCamelCase_ : int=False , lowerCamelCase_ : Optional[int]=False ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = Accelerator(
split_batches=__lowerCAmelCase , dispatch_batches=__lowerCAmelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = get_training_setup(__lowerCAmelCase )
for iteration, batch in enumerate(__lowerCAmelCase ):
UpperCAmelCase_ , UpperCAmelCase_ : Any = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ : Any = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(__lowerCAmelCase ):
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(__lowerCAmelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
UpperCAmelCase_ : Tuple = ddp_input[torch.randperm(len(__lowerCAmelCase ) )]
GradientState._reset_state()
def _lowerCamelCase ( lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : Union[str, Any]=False ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = Accelerator(
split_batches=__lowerCAmelCase , dispatch_batches=__lowerCAmelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = get_training_setup(__lowerCAmelCase , __lowerCAmelCase )
for iteration, batch in enumerate(__lowerCAmelCase ):
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(__lowerCAmelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(__lowerCAmelCase ):
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n'''
UpperCAmelCase_ : Tuple = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(__lowerCAmelCase ))
if accelerator.num_processes > 1:
check_model_parameters(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def _lowerCamelCase ( ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = Accelerator()
UpperCAmelCase_ : str = RegressionDataset(length=80 )
UpperCAmelCase_ : Union[str, Any] = DataLoader(__lowerCAmelCase , batch_size=16 )
UpperCAmelCase_ : Dict = RegressionDataset(length=96 )
UpperCAmelCase_ : Any = DataLoader(__lowerCAmelCase , batch_size=16 )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(__lowerCAmelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__lowerCAmelCase )
if iteration < len(__lowerCAmelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(__lowerCAmelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__lowerCAmelCase )
if batch_num < len(__lowerCAmelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _lowerCamelCase ( ):
"""simple docstring"""
UpperCAmelCase_ : str = Accelerator()
UpperCAmelCase_ : Tuple = accelerator.state
if state.local_process_index == 0:
print('**Test `accumulate` gradient accumulation with dataloader break**' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('**Test NOOP `no_sync` context manager**' )
test_noop_sync(__lowerCAmelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('**Test Distributed `no_sync` context manager**' )
test_distributed_sync(__lowerCAmelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation, ' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(__lowerCAmelCase , __lowerCAmelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('<' , '2.0' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , '`split_batches=False`, `dispatch_batches=False`**' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( lowerCamelCase_ : Any ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 709
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase_ : List[str] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
UpperCAmelCase_ : int = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
UpperCAmelCase_ : Any = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
UpperCAmelCase_ : List[Any] = {'unk_token': '<unk>'}
UpperCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case_ ) )
UpperCAmelCase_ : str = {
'do_resize': True,
'size': 2_0,
'do_center_crop': True,
'crop_size': 1_8,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , snake_case_ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(snake_case_ , snake_case_ )
def _UpperCamelCase ( self , **snake_case_ ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def _UpperCamelCase ( self , **snake_case_ ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **snake_case_ )
def _UpperCamelCase ( self , **snake_case_ ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **snake_case_ )
def _UpperCamelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : str = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
UpperCAmelCase_ : int = [Image.fromarray(np.moveaxis(snake_case_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Optional[Any] = self.get_rust_tokenizer()
UpperCAmelCase_ : Dict = self.get_image_processor()
UpperCAmelCase_ : Optional[Any] = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
processor_slow.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : str = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case_ )
UpperCAmelCase_ : Any = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
processor_fast.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Optional[int] = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , snake_case_ )
self.assertIsInstance(processor_fast.tokenizer , snake_case_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , snake_case_ )
self.assertIsInstance(processor_fast.image_processor , snake_case_ )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Optional[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
UpperCAmelCase_ : int = self.get_image_processor(do_normalize=snake_case_ , padding_value=1.0 )
UpperCAmelCase_ : Optional[Any] = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case_ )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.get_image_processor()
UpperCAmelCase_ : Optional[int] = self.get_tokenizer()
UpperCAmelCase_ : str = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCAmelCase_ : Union[str, Any] = self.prepare_image_inputs()
UpperCAmelCase_ : str = image_processor(snake_case_ , return_tensors='np' )
UpperCAmelCase_ : Union[str, Any] = processor(images=snake_case_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.get_image_processor()
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : List[Any] = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCAmelCase_ : int = 'lower newer'
UpperCAmelCase_ : Optional[Any] = processor(text=snake_case_ )
UpperCAmelCase_ : List[str] = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : int = self.get_image_processor()
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : List[str] = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCAmelCase_ : Optional[Any] = 'lower newer'
UpperCAmelCase_ : Any = self.prepare_image_inputs()
UpperCAmelCase_ : Optional[Any] = processor(text=snake_case_ , images=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(snake_case_ ):
processor()
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self.get_image_processor()
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : str = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCAmelCase_ : Optional[Any] = self.prepare_image_inputs()
UpperCAmelCase_ : Any = self.prepare_image_inputs()
UpperCAmelCase_ : int = processor(images=snake_case_ , visual_prompt=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'conditional_pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(snake_case_ ):
processor()
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.get_image_processor()
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : str = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCAmelCase_ : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ : Dict = processor.batch_decode(snake_case_ )
UpperCAmelCase_ : Union[str, Any] = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
| 389
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE :Optional[int] = {
'''configuration_lilt''': ['''LILT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LiltConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :int = [
'''LILT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LiltForQuestionAnswering''',
'''LiltForSequenceClassification''',
'''LiltForTokenClassification''',
'''LiltModel''',
'''LiltPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 236
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['PLBartTokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'PLBartForCausalLM',
'PLBartForConditionalGeneration',
'PLBartForSequenceClassification',
'PLBartModel',
'PLBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 25
| 0
|
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _lowercase ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
UpperCamelCase = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"""
UpperCamelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ).convert("""RGB""" )
UpperCamelCase = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ),
] )
UpperCamelCase = transform(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 ).to(SCREAMING_SNAKE_CASE_ )
return image
def _lowercase ( SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
if "visual_encoder" in key:
UpperCamelCase = re.sub("""visual_encoder*""" , """vision_model.encoder""" , SCREAMING_SNAKE_CASE_ )
if "blocks" in key:
UpperCamelCase = re.sub(r"""blocks""" , """layers""" , SCREAMING_SNAKE_CASE_ )
if "attn" in key:
UpperCamelCase = re.sub(r"""attn""" , """self_attn""" , SCREAMING_SNAKE_CASE_ )
if "norm1" in key:
UpperCamelCase = re.sub(r"""norm1""" , """layer_norm1""" , SCREAMING_SNAKE_CASE_ )
if "norm2" in key:
UpperCamelCase = re.sub(r"""norm2""" , """layer_norm2""" , SCREAMING_SNAKE_CASE_ )
if "encoder.norm" in key:
UpperCamelCase = re.sub(r"""encoder.norm""" , """post_layernorm""" , SCREAMING_SNAKE_CASE_ )
if "encoder.patch_embed.proj" in key:
UpperCamelCase = re.sub(r"""encoder.patch_embed.proj""" , """embeddings.patch_embedding""" , SCREAMING_SNAKE_CASE_ )
if "encoder.pos_embed" in key:
UpperCamelCase = re.sub(r"""encoder.pos_embed""" , """embeddings.position_embedding""" , SCREAMING_SNAKE_CASE_ )
if "encoder.cls_token" in key:
UpperCamelCase = re.sub(r"""encoder.cls_token""" , """embeddings.class_embedding""" , SCREAMING_SNAKE_CASE_ )
if "self_attn" in key:
UpperCamelCase = re.sub(r"""self_attn.proj""" , """self_attn.projection""" , SCREAMING_SNAKE_CASE_ )
return key
@torch.no_grad()
def _lowercase ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=None ):
"""simple docstring"""
if config_path is not None:
UpperCamelCase = BlipConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
UpperCamelCase = BlipForConditionalGeneration(SCREAMING_SNAKE_CASE_ ).eval()
UpperCamelCase = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"""
UpperCamelCase = blip_decoder(pretrained=SCREAMING_SNAKE_CASE_ , image_size=384 , vit="""base""" )
UpperCamelCase = pt_model.eval()
UpperCamelCase = pt_model.state_dict()
for key in modified_state_dict.copy():
UpperCamelCase = modified_state_dict.pop(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = rename_key(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = value
hf_model.load_state_dict(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = 384
UpperCamelCase = load_demo_image(image_size=SCREAMING_SNAKE_CASE_ , device="""cpu""" )
UpperCamelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" )
UpperCamelCase = tokenizer(["""a picture of"""] ).input_ids
UpperCamelCase = hf_model.generate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert out[0].tolist() == [30_522, 1_037, 3_861, 1_997, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102]
UpperCamelCase = hf_model.generate(SCREAMING_SNAKE_CASE_ )
assert out[0].tolist() == [30_522, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
UpperCamelCase = (
"""https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"""
)
UpperCamelCase = blip_vqa(pretrained=SCREAMING_SNAKE_CASE_ , image_size=SCREAMING_SNAKE_CASE_ , vit="""base""" )
vqa_model.eval()
UpperCamelCase = vqa_model.state_dict()
for key in modified_state_dict.copy():
UpperCamelCase = modified_state_dict.pop(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = rename_key(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = value
UpperCamelCase = BlipForQuestionAnswering(SCREAMING_SNAKE_CASE_ )
hf_vqa_model.load_state_dict(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = ["""How many dogs are in this image?"""]
UpperCamelCase = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ).input_ids
UpperCamelCase = hf_vqa_model.generate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + """_vqa""" )
UpperCamelCase = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"""
UpperCamelCase = blip_itm(pretrained=SCREAMING_SNAKE_CASE_ , image_size=SCREAMING_SNAKE_CASE_ , vit="""base""" )
itm_model.eval()
UpperCamelCase = itm_model.state_dict()
for key in modified_state_dict.copy():
UpperCamelCase = modified_state_dict.pop(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = rename_key(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = value
UpperCamelCase = BlipForImageTextRetrieval(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = ["""A picture of a woman with a dog sitting in a beach"""]
UpperCamelCase = tokenizer(
SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , padding="""max_length""" , truncation=SCREAMING_SNAKE_CASE_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(SCREAMING_SNAKE_CASE_ )
hf_itm_model.eval()
UpperCamelCase = hf_itm_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , use_itm_head=SCREAMING_SNAKE_CASE_ )
UpperCamelCase = hf_itm_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , use_itm_head=SCREAMING_SNAKE_CASE_ )
assert out[0].item() == 0.2110_6874_9427_7954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + """_itm""" )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
__snake_case = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 181
|
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
__snake_case = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCAmelCase ( __snake_case ):
def __init__( self : Dict , *__magic_name__ : Union[str, Any] , __magic_name__ : Any=None , __magic_name__ : Union[str, Any]=None , __magic_name__ : Any=None , **__magic_name__ : Optional[Any] ):
"""simple docstring"""
super().__init__(*__magic_name__ , **__magic_name__ )
UpperCamelCase = eval_examples
UpperCamelCase = post_process_function
UpperCamelCase = quant_trainer_args
UpperCamelCase = 1_2_8 # default number of calibration samples
def lowerCamelCase_ ( self : str , __magic_name__ : List[Any]=None ):
"""simple docstring"""
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("""Trainer: calibration requires an calib_dataset.""" )
UpperCamelCase = calib_dataset if calib_dataset is not None else self.calib_dataset
UpperCamelCase = self._remove_unused_columns(__magic_name__ , description="""Calibration""" )
return DataLoader(
__magic_name__ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__magic_name__ , )
def lowerCamelCase_ ( self : Tuple , __magic_name__ : List[str]=None ):
"""simple docstring"""
UpperCamelCase = self.train_dataset if calib_dataset is None else calib_dataset
UpperCamelCase = self.get_calib_dataloader(__magic_name__ )
UpperCamelCase = self.model
quant_trainer.configure_model(__magic_name__ , self.quant_trainer_args , calib=__magic_name__ )
model.eval()
quant_trainer.enable_calibration(__magic_name__ )
logger.info("""***** Running calibration *****""" )
logger.info(F' Num examples = {self.calib_num}' )
logger.info(F' Batch size = {calib_dataloader.batch_size}' )
for step, inputs in enumerate(__magic_name__ ):
# Prediction step
UpperCamelCase , UpperCamelCase , UpperCamelCase = self.prediction_step(__magic_name__ , __magic_name__ , prediction_loss_only=__magic_name__ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(__magic_name__ , self.quant_trainer_args )
UpperCamelCase = model
def lowerCamelCase_ ( self : Any , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[Any]=None , __magic_name__ : Union[str, Any]=None , __magic_name__ : str = "eval" ):
"""simple docstring"""
UpperCamelCase = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCamelCase = self.get_eval_dataloader(__magic_name__ )
UpperCamelCase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase = self.compute_metrics
UpperCamelCase = None
UpperCamelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCamelCase = eval_loop(
__magic_name__ , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__magic_name__ , )
finally:
UpperCamelCase = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
UpperCamelCase = self.post_process_function(__magic_name__ , __magic_name__ , output.predictions )
UpperCamelCase = self.compute_metrics(__magic_name__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'{metric_key_prefix}_' ):
UpperCamelCase = metrics.pop(__magic_name__ )
self.log(__magic_name__ )
else:
UpperCamelCase = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCamelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , __magic_name__ )
return metrics
def lowerCamelCase_ ( self : Dict , __magic_name__ : List[str] , __magic_name__ : List[Any] , __magic_name__ : List[Any]=None , __magic_name__ : str = "test" ):
"""simple docstring"""
UpperCamelCase = self.get_test_dataloader(__magic_name__ )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase = self.compute_metrics
UpperCamelCase = None
UpperCamelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCamelCase = eval_loop(
__magic_name__ , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__magic_name__ , )
finally:
UpperCamelCase = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCamelCase = self.post_process_function(__magic_name__ , __magic_name__ , output.predictions , """predict""" )
UpperCamelCase = self.compute_metrics(__magic_name__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'{metric_key_prefix}_' ):
UpperCamelCase = metrics.pop(__magic_name__ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__magic_name__ )
def lowerCamelCase_ ( self : List[str] , __magic_name__ : Optional[Any]="./" ):
"""simple docstring"""
UpperCamelCase = self.eval_dataset
UpperCamelCase = self.get_eval_dataloader(__magic_name__ )
UpperCamelCase = next(iter(__magic_name__ ) )
# saving device - to make it consistent
UpperCamelCase = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
# convert to tuple
UpperCamelCase = tuple(v.to(__magic_name__ ) for k, v in batch.items() )
logger.info("""Converting model to be onnx compatible""" )
from pytorch_quantization.nn import TensorQuantizer
UpperCamelCase = True
UpperCamelCase = self.model.to(__magic_name__ )
model.eval()
model.float()
UpperCamelCase = model.module if hasattr(__magic_name__ , """module""" ) else model
quant_trainer.configure_model(__magic_name__ , self.quant_trainer_args )
UpperCamelCase = os.path.join(__magic_name__ , """model.onnx""" )
logger.info(F'exporting model to {output_model_file}' )
UpperCamelCase = {0: """batch_size""", 1: """seq_len"""}
torch.onnx.export(
__magic_name__ , __magic_name__ , __magic_name__ , export_params=__magic_name__ , opset_version=1_3 , do_constant_folding=__magic_name__ , input_names=["""input_ids""", """attention_mask""", """token_type_ids"""] , output_names=["""output_start_logits""", """output_end_logits"""] , dynamic_axes={
"""input_ids""": axes,
"""attention_mask""": axes,
"""token_type_ids""": axes,
"""output_start_logits""": axes,
"""output_end_logits""": axes,
} , verbose=__magic_name__ , )
logger.info("""onnx export finished""" )
| 181
| 1
|
"""simple docstring"""
from datetime import datetime as dt
import os
from github import Github
__UpperCamelCase : Optional[Any] = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = Github(os.environ['GITHUB_TOKEN'] )
lowerCAmelCase = g.get_repo('huggingface/transformers' )
lowerCAmelCase = repo.get_issues(state='open' )
for issue in open_issues:
lowerCAmelCase = sorted([comment for comment in issue.get_comments()] , key=lambda _UpperCAmelCase : i.created_at , reverse=_UpperCAmelCase )
lowerCAmelCase = comments[0] if len(_UpperCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='closed' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 4
|
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class lowerCamelCase (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 159
| 0
|
def lowerCAmelCase_ ( lowercase: List[str] , lowercase: List[str] ):
'''simple docstring'''
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712
|
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def lowerCAmelCase_ ( lowercase: Optional[int] , lowercase: Any , lowercase: str , lowercase: List[str] , lowercase: Optional[int] ) -> int:
'''simple docstring'''
# load base model
_UpperCamelCase: Optional[int] = StableDiffusionPipeline.from_pretrained(lowercase , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
_UpperCamelCase: List[str] = load_file(lowercase )
_UpperCamelCase: Optional[int] = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
_UpperCamelCase: int = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' )
_UpperCamelCase: str = pipeline.text_encoder
else:
_UpperCamelCase: Optional[Any] = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' )
_UpperCamelCase: Any = pipeline.unet
# find the target layer
_UpperCamelCase: Union[str, Any] = layer_infos.pop(0 )
while len(lowercase ) > -1:
try:
_UpperCamelCase: Union[str, Any] = curr_layer.__getattr__(lowercase )
if len(lowercase ) > 0:
_UpperCamelCase: Tuple = layer_infos.pop(0 )
elif len(lowercase ) == 0:
break
except Exception:
if len(lowercase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
_UpperCamelCase: List[Any] = layer_infos.pop(0 )
_UpperCamelCase: Any = []
if "lora_down" in key:
pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) )
pair_keys.append(lowercase )
else:
pair_keys.append(lowercase )
pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
_UpperCamelCase: str = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
_UpperCamelCase: Optional[int] = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase , lowercase ).unsqueeze(2 ).unsqueeze(3 )
else:
_UpperCamelCase: str = state_dict[pair_keys[0]].to(torch.floataa )
_UpperCamelCase: Dict = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase , lowercase )
# update visited list
for item in pair_keys:
visited.append(lowercase )
return pipeline
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_path''', default=None, type=str, required=True, help='''Path to the base model in diffusers format.'''
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--lora_prefix_unet''', default='''lora_unet''', type=str, help='''The prefix of UNet weight in safetensors'''
)
parser.add_argument(
'''--lora_prefix_text_encoder''',
default='''lora_te''',
type=str,
help='''The prefix of text encoder weight in safetensors''',
)
parser.add_argument('''--alpha''', default=0.7_5, type=float, help='''The merging ratio in W = W0 + alpha * deltaW''')
parser.add_argument(
'''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.'''
)
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = args.base_model_path
UpperCAmelCase_ = args.checkpoint_path
UpperCAmelCase_ = args.dump_path
UpperCAmelCase_ = args.lora_prefix_unet
UpperCAmelCase_ = args.lora_prefix_text_encoder
UpperCAmelCase_ = args.alpha
UpperCAmelCase_ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
UpperCAmelCase_ = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 264
| 0
|
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.''' )
if tokenizer_name is None:
lowercase = TOKENIZER_CLASSES
else:
lowercase = {tokenizer_name: getattr(__SCREAMING_SNAKE_CASE , tokenizer_name + 'Fast' )}
logger.info(F'''Loading tokenizer classes: {tokenizer_names}''' )
for tokenizer_name in tokenizer_names:
lowercase = TOKENIZER_CLASSES[tokenizer_name]
lowercase = True
if checkpoint_name is None:
lowercase = list(tokenizer_class.max_model_input_sizes.keys() )
else:
lowercase = [checkpoint_name]
logger.info(F'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''' )
for checkpoint in checkpoint_names:
logger.info(F'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''' )
# Load tokenizer
lowercase = tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE )
# Save fast tokenizer
logger.info(F'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''' )
# For organization names we create sub-directories
if "/" in checkpoint:
lowercase , lowercase = checkpoint.split('/' )
lowercase = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif add_prefix:
lowercase = checkpoint
lowercase = dump_path
else:
lowercase = None
lowercase = dump_path
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
lowercase = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
lowercase = file_path.split(__SCREAMING_SNAKE_CASE )[-1][0]
if next_char == "/":
lowercase = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase = None
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
lowercase = tokenizer.save_pretrained(
__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE , filename_prefix=__SCREAMING_SNAKE_CASE )
logger.info(F'''=> File names {file_names}''' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(__SCREAMING_SNAKE_CASE )
logger.info(F'''=> removing {file_name}''' )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
UpperCAmelCase = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 84
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase: Dict = logging.get_logger(__name__)
def _lowerCamelCase ( snake_case ):
_lowerCAmelCase = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
_lowerCAmelCase = [144, 192, 240]
_lowerCAmelCase = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
_lowerCAmelCase = [96, 120, 144]
_lowerCAmelCase = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
_lowerCAmelCase = [64, 80, 96]
_lowerCAmelCase = [16, 16, 24, 48, 64, 80, 320]
_lowerCAmelCase = 0.05
_lowerCAmelCase = 2.0
if mobilevit_name.startswith('deeplabv3_' ):
_lowerCAmelCase = 512
_lowerCAmelCase = 16
_lowerCAmelCase = 21
_lowerCAmelCase = 'pascal-voc-id2label.json'
else:
_lowerCAmelCase = 1_000
_lowerCAmelCase = 'imagenet-1k-id2label.json'
_lowerCAmelCase = 'huggingface/label-files'
_lowerCAmelCase = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='dataset' ) , 'r' ) )
_lowerCAmelCase = {int(snake_case ): v for k, v in idalabel.items()}
_lowerCAmelCase = idalabel
_lowerCAmelCase = {v: k for k, v in idalabel.items()}
return config
def _lowerCamelCase ( snake_case , snake_case=False ):
for i in range(1 , 6 ):
if F'layer_{i}.' in name:
_lowerCAmelCase = name.replace(F'layer_{i}.' , F'encoder.layer.{i - 1}.' )
if "conv_1." in name:
_lowerCAmelCase = name.replace('conv_1.' , 'conv_stem.' )
if ".block." in name:
_lowerCAmelCase = name.replace('.block.' , '.' )
if "exp_1x1" in name:
_lowerCAmelCase = name.replace('exp_1x1' , 'expand_1x1' )
if "red_1x1" in name:
_lowerCAmelCase = name.replace('red_1x1' , 'reduce_1x1' )
if ".local_rep.conv_3x3." in name:
_lowerCAmelCase = name.replace('.local_rep.conv_3x3.' , '.conv_kxk.' )
if ".local_rep.conv_1x1." in name:
_lowerCAmelCase = name.replace('.local_rep.conv_1x1.' , '.conv_1x1.' )
if ".norm." in name:
_lowerCAmelCase = name.replace('.norm.' , '.normalization.' )
if ".conv." in name:
_lowerCAmelCase = name.replace('.conv.' , '.convolution.' )
if ".conv_proj." in name:
_lowerCAmelCase = name.replace('.conv_proj.' , '.conv_projection.' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F'.{i}.{j}.' in name:
_lowerCAmelCase = name.replace(F'.{i}.{j}.' , F'.{i}.layer.{j}.' )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F'.{i}.{j}.' in name:
_lowerCAmelCase = name.replace(F'.{i}.{j}.' , F'.{i}.' )
if "expand_1x1" in name:
_lowerCAmelCase = name.replace('expand_1x1' , 'downsampling_layer.expand_1x1' )
if "conv_3x3" in name:
_lowerCAmelCase = name.replace('conv_3x3' , 'downsampling_layer.conv_3x3' )
if "reduce_1x1" in name:
_lowerCAmelCase = name.replace('reduce_1x1' , 'downsampling_layer.reduce_1x1' )
for i in range(2 , 5 ):
if F'.global_rep.{i}.weight' in name:
_lowerCAmelCase = name.replace(F'.global_rep.{i}.weight' , '.layernorm.weight' )
if F'.global_rep.{i}.bias' in name:
_lowerCAmelCase = name.replace(F'.global_rep.{i}.bias' , '.layernorm.bias' )
if ".global_rep." in name:
_lowerCAmelCase = name.replace('.global_rep.' , '.transformer.' )
if ".pre_norm_mha.0." in name:
_lowerCAmelCase = name.replace('.pre_norm_mha.0.' , '.layernorm_before.' )
if ".pre_norm_mha.1.out_proj." in name:
_lowerCAmelCase = name.replace('.pre_norm_mha.1.out_proj.' , '.attention.output.dense.' )
if ".pre_norm_ffn.0." in name:
_lowerCAmelCase = name.replace('.pre_norm_ffn.0.' , '.layernorm_after.' )
if ".pre_norm_ffn.1." in name:
_lowerCAmelCase = name.replace('.pre_norm_ffn.1.' , '.intermediate.dense.' )
if ".pre_norm_ffn.4." in name:
_lowerCAmelCase = name.replace('.pre_norm_ffn.4.' , '.output.dense.' )
if ".transformer." in name:
_lowerCAmelCase = name.replace('.transformer.' , '.transformer.layer.' )
if ".aspp_layer." in name:
_lowerCAmelCase = name.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in name:
_lowerCAmelCase = name.replace('.aspp_pool.' , '.' )
if "seg_head." in name:
_lowerCAmelCase = name.replace('seg_head.' , 'segmentation_head.' )
if "segmentation_head.classifier.classifier." in name:
_lowerCAmelCase = name.replace('segmentation_head.classifier.classifier.' , 'segmentation_head.classifier.' )
if "classifier.fc." in name:
_lowerCAmelCase = name.replace('classifier.fc.' , 'classifier.' )
elif (not base_model) and ("segmentation_head." not in name):
_lowerCAmelCase = 'mobilevit.' + name
return name
def _lowerCamelCase ( snake_case , snake_case , snake_case=False ):
if base_model:
_lowerCAmelCase = ''
else:
_lowerCAmelCase = 'mobilevit.'
for key in orig_state_dict.copy().keys():
_lowerCAmelCase = orig_state_dict.pop(snake_case )
if key[:8] == "encoder.":
_lowerCAmelCase = key[8:]
if "qkv" in key:
_lowerCAmelCase = key.split('.' )
_lowerCAmelCase = int(key_split[0][6:] ) - 1
_lowerCAmelCase = int(key_split[3] )
_lowerCAmelCase = model.get_submodule(F'{model_prefix}encoder.layer.{layer_num}' )
_lowerCAmelCase = layer.transformer.layer[transformer_num].attention.attention.all_head_size
_lowerCAmelCase = (
F'{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'
)
if "weight" in key:
_lowerCAmelCase = val[:dim, :]
_lowerCAmelCase = val[dim : dim * 2, :]
_lowerCAmelCase = val[-dim:, :]
else:
_lowerCAmelCase = val[:dim]
_lowerCAmelCase = val[dim : dim * 2]
_lowerCAmelCase = val[-dim:]
else:
_lowerCAmelCase = val
return orig_state_dict
def _lowerCamelCase ( ):
_lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowerCAmelCase = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return im
@torch.no_grad()
def _lowerCamelCase ( snake_case , snake_case , snake_case , snake_case=False ):
_lowerCAmelCase = get_mobilevit_config(snake_case )
# load original state_dict
_lowerCAmelCase = torch.load(snake_case , map_location='cpu' )
# load 🤗 model
if mobilevit_name.startswith('deeplabv3_' ):
_lowerCAmelCase = MobileViTForSemanticSegmentation(snake_case ).eval()
else:
_lowerCAmelCase = MobileViTForImageClassification(snake_case ).eval()
_lowerCAmelCase = convert_state_dict(snake_case , snake_case )
model.load_state_dict(snake_case )
# Check outputs on an image, prepared by MobileViTImageProcessor
_lowerCAmelCase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_lowerCAmelCase = image_processor(images=prepare_img() , return_tensors='pt' )
_lowerCAmelCase = model(**snake_case )
_lowerCAmelCase = outputs.logits
if mobilevit_name.startswith('deeplabv3_' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
_lowerCAmelCase = torch.tensor(
[
[[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]],
[[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]],
[[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
_lowerCAmelCase = torch.tensor(
[
[[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]],
[[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]],
[[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
_lowerCAmelCase = torch.tensor(
[
[[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]],
[[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]],
[[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]],
] )
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}' )
assert torch.allclose(logits[0, :3, :3, :3] , snake_case , atol=1E-4 )
else:
assert logits.shape == (1, 1_000)
if mobilevit_name == "mobilevit_s":
_lowerCAmelCase = torch.tensor([-0.98_66, 0.23_92, -1.12_41] )
elif mobilevit_name == "mobilevit_xs":
_lowerCAmelCase = torch.tensor([-2.47_61, -0.93_99, -1.95_87] )
elif mobilevit_name == "mobilevit_xxs":
_lowerCAmelCase = torch.tensor([-1.93_64, -1.23_27, -0.46_53] )
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}' )
assert torch.allclose(logits[0, :3] , snake_case , atol=1E-4 )
Path(snake_case ).mkdir(exist_ok=snake_case )
print(F'Saving model {mobilevit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case )
if push_to_hub:
_lowerCAmelCase = {
'mobilevit_s': 'mobilevit-small',
'mobilevit_xs': 'mobilevit-x-small',
'mobilevit_xxs': 'mobilevit-xx-small',
'deeplabv3_mobilevit_s': 'deeplabv3-mobilevit-small',
'deeplabv3_mobilevit_xs': 'deeplabv3-mobilevit-x-small',
'deeplabv3_mobilevit_xxs': 'deeplabv3-mobilevit-xx-small',
}
print('Pushing to the hub...' )
_lowerCAmelCase = model_mapping[mobilevit_name]
image_processor.push_to_hub(snake_case , organization='apple' )
model.push_to_hub(snake_case , organization='apple' )
if __name__ == "__main__":
_lowercase: Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_lowercase: List[str] = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 192
| 0
|
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __UpperCAmelCase( lowercase_ , lowercase_ ):
assert isinstance(_lowercase , _lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_ ):
_lowerCamelCase : Optional[Any] = tmp_path / '''cache'''
_lowerCamelCase : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowerCamelCase : str = JsonDatasetReader(_lowercase , cache_dir=_lowercase , keep_in_memory=_lowercase ).read()
_check_json_dataset(_lowercase , _lowercase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_ ):
_lowerCamelCase : Optional[int] = tmp_path / '''cache'''
_lowerCamelCase : List[str] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_lowerCamelCase : List[Any] = features.copy() if features else default_expected_features
_lowerCamelCase : int = (
Features({feature: Value(_lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCamelCase : Union[str, Any] = JsonDatasetReader(_lowercase , features=_lowercase , cache_dir=_lowercase ).read()
_check_json_dataset(_lowercase , _lowercase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_ ):
_lowerCamelCase : Optional[int] = tmp_path / '''cache'''
_lowerCamelCase : List[Any] = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
_lowerCamelCase : List[str] = features.copy() if features else default_expected_features
_lowerCamelCase : Optional[int] = (
Features({feature: Value(_lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCamelCase : int = JsonDatasetReader(_lowercase , features=_lowercase , cache_dir=_lowercase ).read()
assert isinstance(_lowercase , _lowercase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def __UpperCAmelCase( lowercase_ , lowercase_ ):
_lowerCamelCase : Optional[int] = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
_lowerCamelCase : List[Any] = features.copy()
_lowerCamelCase : Any = (
Features({feature: Value(_lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCamelCase : List[str] = tmp_path / '''cache'''
_lowerCamelCase : Optional[Any] = JsonDatasetReader(_lowercase , features=_lowercase , cache_dir=_lowercase ).read()
assert isinstance(_lowercase , _lowercase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_ ):
_lowerCamelCase : Tuple = tmp_path / '''cache'''
_lowerCamelCase : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_lowerCamelCase : Optional[int] = JsonDatasetReader(_lowercase , cache_dir=_lowercase , split=_lowercase ).read()
_check_json_dataset(_lowercase , _lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_ ):
if issubclass(_lowercase , _lowercase ):
_lowerCamelCase : List[Any] = jsonl_path
elif issubclass(_lowercase , _lowercase ):
_lowerCamelCase : List[str] = [jsonl_path]
_lowerCamelCase : List[str] = tmp_path / '''cache'''
_lowerCamelCase : Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_lowerCamelCase : List[Any] = JsonDatasetReader(_lowercase , cache_dir=_lowercase ).read()
_check_json_dataset(_lowercase , _lowercase )
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_=("train",) ):
assert isinstance(_lowercase , _lowercase )
for split in splits:
_lowerCamelCase : List[str] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_ ):
_lowerCamelCase : str = tmp_path / '''cache'''
_lowerCamelCase : str = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowerCamelCase : Union[str, Any] = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=_lowercase , keep_in_memory=_lowercase ).read()
_check_json_datasetdict(_lowercase , _lowercase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_ ):
_lowerCamelCase : Union[str, Any] = tmp_path / '''cache'''
_lowerCamelCase : str = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_lowerCamelCase : Dict = features.copy() if features else default_expected_features
_lowerCamelCase : Any = (
Features({feature: Value(_lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCamelCase : Union[str, Any] = JsonDatasetReader({'''train''': jsonl_path} , features=_lowercase , cache_dir=_lowercase ).read()
_check_json_datasetdict(_lowercase , _lowercase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_ ):
if split:
_lowerCamelCase : List[Any] = {split: jsonl_path}
else:
_lowerCamelCase : List[str] = '''train'''
_lowerCamelCase : str = {'''train''': jsonl_path, '''test''': jsonl_path}
_lowerCamelCase : int = tmp_path / '''cache'''
_lowerCamelCase : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_lowerCamelCase : Union[str, Any] = JsonDatasetReader(_lowercase , cache_dir=_lowercase ).read()
_check_json_datasetdict(_lowercase , _lowercase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __UpperCAmelCase( lowercase_ ):
return json.load(_lowercase )
def __UpperCAmelCase( lowercase_ ):
return [json.loads(_lowercase ) for line in buffer]
class __A :
"""simple docstring"""
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)])
def __snake_case ( self , a__ , a__ , a__):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(__A , __A , lines=__A).write()
buffer.seek(0)
_lowerCamelCase : Optional[int] = load_json_function(__A)
assert isinstance(__A , __A)
assert isinstance(exported_content[0] , __A)
assert len(__A) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789'''), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def __snake_case ( self , a__ , a__ , a__ , a__ , a__):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(__A , __A , lines=__A , orient=__A).write()
buffer.seek(0)
_lowerCamelCase : List[str] = load_json(__A)
assert isinstance(__A , __A)
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__A , '''keys''') and not hasattr(exported_content[0] , '''keys''')
if len_at:
assert len(exported_content[len_at]) == 10
else:
assert len(__A) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)])
def __snake_case ( self , a__ , a__ , a__):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(__A , __A , lines=__A , num_proc=2).write()
buffer.seek(0)
_lowerCamelCase : Any = load_json_function(__A)
assert isinstance(__A , __A)
assert isinstance(exported_content[0] , __A)
assert len(__A) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789'''), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def __snake_case ( self , a__ , a__ , a__ , a__ , a__):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(__A , __A , lines=__A , orient=__A , num_proc=2).write()
buffer.seek(0)
_lowerCamelCase : Union[str, Any] = load_json(__A)
assert isinstance(__A , __A)
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__A , '''keys''') and not hasattr(exported_content[0] , '''keys''')
if len_at:
assert len(exported_content[len_at]) == 10
else:
assert len(__A) == 10
def __snake_case ( self , a__):
"""simple docstring"""
with pytest.raises(__A):
with io.BytesIO() as buffer:
JsonDatasetWriter(__A , __A , num_proc=0)
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')])
def __snake_case ( self , a__ , a__ , a__ , a__ , a__):
"""simple docstring"""
_lowerCamelCase : Tuple = tmp_path_factory.mktemp('''data''') / F"""test.json.{extension}"""
_lowerCamelCase : Union[str, Any] = str(shared_datadir / F"""test_file.json.{extension}""")
JsonDatasetWriter(__A , __A , compression=__A).write()
with fsspec.open(__A , '''rb''' , compression='''infer''') as f:
_lowerCamelCase : Union[str, Any] = f.read()
with fsspec.open(__A , '''rb''' , compression='''infer''') as f:
_lowerCamelCase : List[Any] = f.read()
assert exported_content == original_content
| 718
|
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
_lowerCamelCase = logging.get_logger(__name__)
@dataclass
class __A :
"""simple docstring"""
UpperCAmelCase__ = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} )
UpperCAmelCase__ = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
UpperCAmelCase__ = field(
default=128 ,metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} ,)
UpperCAmelCase__ = field(
default=lowerCamelCase__ ,metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Dict = self.task_name.lower()
class __A ( lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase__ = """train"""
UpperCAmelCase__ = """dev"""
UpperCAmelCase__ = """test"""
class __A ( lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
def __init__( self , a__ , a__ , a__ = None , a__ = Split.train , a__ = None , ):
"""simple docstring"""
warnings.warn(
'''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' , a__ , )
_lowerCamelCase : Optional[Any] = args
_lowerCamelCase : Tuple = glue_processors[args.task_name]()
_lowerCamelCase : Any = glue_output_modes[args.task_name]
if isinstance(a__ , a__):
try:
_lowerCamelCase : List[Any] = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''')
# Load data features from cache or dataset file
_lowerCamelCase : Tuple = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
_lowerCamelCase : int = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = label_list[2], label_list[1]
_lowerCamelCase : str = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCamelCase : Any = cached_features_file + '''.lock'''
with FileLock(a__):
if os.path.exists(a__) and not args.overwrite_cache:
_lowerCamelCase : Any = time.time()
_lowerCamelCase : Optional[Any] = torch.load(a__)
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start)
else:
logger.info(F"""Creating features from dataset file at {args.data_dir}""")
if mode == Split.dev:
_lowerCamelCase : List[str] = self.processor.get_dev_examples(args.data_dir)
elif mode == Split.test:
_lowerCamelCase : str = self.processor.get_test_examples(args.data_dir)
else:
_lowerCamelCase : List[Any] = self.processor.get_train_examples(args.data_dir)
if limit_length is not None:
_lowerCamelCase : List[Any] = examples[:limit_length]
_lowerCamelCase : List[str] = glue_convert_examples_to_features(
a__ , a__ , max_length=args.max_seq_length , label_list=a__ , output_mode=self.output_mode , )
_lowerCamelCase : int = time.time()
torch.save(self.features , a__)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""")
def __len__( self):
"""simple docstring"""
return len(self.features)
def __getitem__( self , a__):
"""simple docstring"""
return self.features[i]
def __snake_case ( self):
"""simple docstring"""
return self.label_list
| 613
| 0
|
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Any, SCREAMING_SNAKE_CASE__: List[str] ) -> Any:
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Optional[Any], SCREAMING_SNAKE_CASE__: Union[str, Any], SCREAMING_SNAKE_CASE__: Dict ) -> List[str]:
"""simple docstring"""
__a = tmp_path / 'cache'
__a = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__a = JsonDatasetReader(SCREAMING_SNAKE_CASE__, cache_dir=SCREAMING_SNAKE_CASE__, keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_json_dataset(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
'features', [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
], )
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: str, SCREAMING_SNAKE_CASE__: List[Any], SCREAMING_SNAKE_CASE__: List[str] ) -> Dict:
"""simple docstring"""
__a = tmp_path / 'cache'
__a = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__a = features.copy() if features else default_expected_features
__a = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__a = JsonDatasetReader(SCREAMING_SNAKE_CASE__, features=SCREAMING_SNAKE_CASE__, cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_json_dataset(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
'features', [
None,
{'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'},
], )
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Tuple, SCREAMING_SNAKE_CASE__: Union[str, Any], SCREAMING_SNAKE_CASE__: int ) -> Optional[Any]:
"""simple docstring"""
__a = tmp_path / 'cache'
__a = {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}
__a = features.copy() if features else default_expected_features
__a = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__a = JsonDatasetReader(SCREAMING_SNAKE_CASE__, features=SCREAMING_SNAKE_CASE__, cache_dir=SCREAMING_SNAKE_CASE__ ).read()
assert isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Union[str, Any], SCREAMING_SNAKE_CASE__: Dict ) -> int:
"""simple docstring"""
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
__a = {'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'}
__a = features.copy()
__a = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__a = tmp_path / 'cache'
__a = JsonDatasetReader(SCREAMING_SNAKE_CASE__, features=SCREAMING_SNAKE_CASE__, cache_dir=SCREAMING_SNAKE_CASE__ ).read()
assert isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Optional[int], SCREAMING_SNAKE_CASE__: List[Any], SCREAMING_SNAKE_CASE__: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__a = tmp_path / 'cache'
__a = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__a = JsonDatasetReader(SCREAMING_SNAKE_CASE__, cache_dir=SCREAMING_SNAKE_CASE__, split=SCREAMING_SNAKE_CASE__ ).read()
_check_json_dataset(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type', [str, list] )
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Optional[int], SCREAMING_SNAKE_CASE__: Any, SCREAMING_SNAKE_CASE__: Tuple ) -> List[str]:
"""simple docstring"""
if issubclass(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
__a = jsonl_path
elif issubclass(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
__a = [jsonl_path]
__a = tmp_path / 'cache'
__a = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__a = JsonDatasetReader(SCREAMING_SNAKE_CASE__, cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_json_dataset(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Any, SCREAMING_SNAKE_CASE__: Any, SCREAMING_SNAKE_CASE__: Optional[int]=("train",) ) -> List[str]:
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
for split in splits:
__a = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Union[str, Any], SCREAMING_SNAKE_CASE__: Optional[Any], SCREAMING_SNAKE_CASE__: str ) -> Any:
"""simple docstring"""
__a = tmp_path / 'cache'
__a = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__a = JsonDatasetReader({'train': jsonl_path}, cache_dir=SCREAMING_SNAKE_CASE__, keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_json_datasetdict(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
'features', [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
], )
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Dict, SCREAMING_SNAKE_CASE__: Optional[Any], SCREAMING_SNAKE_CASE__: Union[str, Any] ) -> Any:
"""simple docstring"""
__a = tmp_path / 'cache'
__a = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__a = features.copy() if features else default_expected_features
__a = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__a = JsonDatasetReader({'train': jsonl_path}, features=SCREAMING_SNAKE_CASE__, cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_json_datasetdict(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Any, SCREAMING_SNAKE_CASE__: Union[str, Any], SCREAMING_SNAKE_CASE__: str ) -> Union[str, Any]:
"""simple docstring"""
if split:
__a = {split: jsonl_path}
else:
__a = 'train'
__a = {'train': jsonl_path, 'test': jsonl_path}
__a = tmp_path / 'cache'
__a = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__a = JsonDatasetReader(SCREAMING_SNAKE_CASE__, cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_json_datasetdict(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: List[str] ) -> Tuple:
"""simple docstring"""
return json.load(SCREAMING_SNAKE_CASE__ )
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: int ) -> Any:
"""simple docstring"""
return [json.loads(SCREAMING_SNAKE_CASE__ ) for line in buffer]
class __SCREAMING_SNAKE_CASE :
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) ->Tuple:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase , lowerCamelCase , lines=lowerCamelCase ).write()
buffer.seek(0 )
__a = load_json_function(lowerCamelCase )
assert isinstance(lowerCamelCase , lowerCamelCase )
assert isinstance(exported_content[0] , lowerCamelCase )
assert len(lowerCamelCase ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) ->Tuple:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase , lowerCamelCase , lines=lowerCamelCase , orient=lowerCamelCase ).write()
buffer.seek(0 )
__a = load_json(lowerCamelCase )
assert isinstance(lowerCamelCase , lowerCamelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowerCamelCase , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(lowerCamelCase ) == 10
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) ->Optional[int]:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase , lowerCamelCase , lines=lowerCamelCase , num_proc=2 ).write()
buffer.seek(0 )
__a = load_json_function(lowerCamelCase )
assert isinstance(lowerCamelCase , lowerCamelCase )
assert isinstance(exported_content[0] , lowerCamelCase )
assert len(lowerCamelCase ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) ->List[Any]:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase , lowerCamelCase , lines=lowerCamelCase , orient=lowerCamelCase , num_proc=2 ).write()
buffer.seek(0 )
__a = load_json(lowerCamelCase )
assert isinstance(lowerCamelCase , lowerCamelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowerCamelCase , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(lowerCamelCase ) == 10
def __UpperCamelCase ( self , lowerCamelCase ) ->str:
'''simple docstring'''
with pytest.raises(lowerCamelCase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase , lowerCamelCase , num_proc=0 )
@pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) ->List[str]:
'''simple docstring'''
__a = tmp_path_factory.mktemp('data' ) / F"""test.json.{extension}"""
__a = str(shared_datadir / F"""test_file.json.{extension}""" )
JsonDatasetWriter(lowerCamelCase , lowerCamelCase , compression=lowerCamelCase ).write()
with fsspec.open(lowerCamelCase , 'rb' , compression='infer' ) as f:
__a = f.read()
with fsspec.open(lowerCamelCase , 'rb' , compression='infer' ) as f:
__a = f.read()
assert exported_content == original_content
| 448
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
__a =["pixel_values"]
def __init__( self , lowerCamelCase = True , lowerCamelCase = 32 , lowerCamelCase=PILImageResampling.BILINEAR , lowerCamelCase = True , **lowerCamelCase , ) ->None:
'''simple docstring'''
__a = do_resize
__a = do_rescale
__a = size_divisor
__a = resample
super().__init__(**lowerCamelCase )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ) ->np.ndarray:
'''simple docstring'''
__a , __a = get_image_size(lowerCamelCase )
# Rounds the height and width down to the closest multiple of size_divisor
__a = height // size_divisor * size_divisor
__a = width // size_divisor * size_divisor
__a = resize(lowerCamelCase , (new_h, new_w) , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
return image
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ) ->np.ndarray:
'''simple docstring'''
return rescale(image=lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase=None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ) ->BatchFeature:
'''simple docstring'''
__a = do_resize if do_resize is not None else self.do_resize
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = size_divisor if size_divisor is not None else self.size_divisor
__a = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
__a = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
__a = [to_numpy_array(lowerCamelCase ) for img in images]
if do_resize:
__a = [self.resize(lowerCamelCase , size_divisor=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_rescale:
__a = [self.rescale(lowerCamelCase , scale=1 / 255 ) for image in images]
__a = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
__a = {'pixel_values': images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 448
| 1
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
lowerCamelCase_ : List[str] = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
lowercase_ : Optional[int] = 42
lowercase_ : Dict = 42
lowercase_ : Any = 42
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
lowercase_ : List[str] = 42
lowercase_ : Optional[int] = 42
lowercase_ : Union[str, Any] = None
lowercase_ : List[str] = None
class _UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
lowercase_ : Union[str, Any] = """train"""
lowercase_ : List[str] = """dev"""
lowercase_ : List[Any] = """test"""
class _UpperCAmelCase :
'''simple docstring'''
@staticmethod
def lowerCamelCase_ ( snake_case_ , snake_case_ ):
"""simple docstring"""
raise NotImplementedError
@staticmethod
def lowerCamelCase_ ( snake_case_ ):
"""simple docstring"""
raise NotImplementedError
@staticmethod
def lowerCamelCase_ ( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=False , snake_case_="[CLS]" , snake_case_=1 , snake_case_="[SEP]" , snake_case_=False , snake_case_=False , snake_case_=0 , snake_case_=0 , snake_case_=-1_0_0 , snake_case_=0 , snake_case_=True , ):
"""simple docstring"""
A_ : List[str] = {label: i for i, label in enumerate(A__ )}
A_ : str = []
for ex_index, example in enumerate(A__ ):
if ex_index % 1_0_0_0_0 == 0:
logger.info('Writing example %d of %d' , A__ , len(A__ ) )
A_ : Tuple = []
A_ : Optional[int] = []
for word, label in zip(example.words , example.labels ):
A_ : str = tokenizer.tokenize(A__ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(A__ ) > 0:
tokens.extend(A__ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(A__ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
A_ : Any = tokenizer.num_special_tokens_to_add()
if len(A__ ) > max_seq_length - special_tokens_count:
A_ : Any = tokens[: (max_seq_length - special_tokens_count)]
A_ : Union[str, Any] = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
A_ : Tuple = [sequence_a_segment_id] * len(A__ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
A_ : str = [cls_token] + tokens
A_ : Dict = [pad_token_label_id] + label_ids
A_ : Any = [cls_token_segment_id] + segment_ids
A_ : Tuple = tokenizer.convert_tokens_to_ids(A__ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
A_ : str = [1 if mask_padding_with_zero else 0] * len(A__ )
# Zero-pad up to the sequence length.
A_ : str = max_seq_length - len(A__ )
if pad_on_left:
A_ : Any = ([pad_token] * padding_length) + input_ids
A_ : Any = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
A_ : List[Any] = ([pad_token_segment_id] * padding_length) + segment_ids
A_ : Union[str, Any] = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(A__ ) == max_seq_length
assert len(A__ ) == max_seq_length
assert len(A__ ) == max_seq_length
assert len(A__ ) == max_seq_length
if ex_index < 5:
logger.info('*** Example ***' )
logger.info('guid: %s' , example.guid )
logger.info('tokens: %s' , ' '.join([str(A__ ) for x in tokens] ) )
logger.info('input_ids: %s' , ' '.join([str(A__ ) for x in input_ids] ) )
logger.info('input_mask: %s' , ' '.join([str(A__ ) for x in input_mask] ) )
logger.info('segment_ids: %s' , ' '.join([str(A__ ) for x in segment_ids] ) )
logger.info('label_ids: %s' , ' '.join([str(A__ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
A_ : List[str] = None
features.append(
InputFeatures(
input_ids=A__ , attention_mask=A__ , token_type_ids=A__ , label_ids=A__ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class _UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
lowercase_ : List[Any] = 42
lowercase_ : int = nn.CrossEntropyLoss().ignore_index
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = None , snake_case_=False , snake_case_ = Split.train , ):
"""simple docstring"""
A_ : List[str] = os.path.join(
A__ , 'cached_{}_{}_{}'.format(mode.value , tokenizer.__class__.__name__ , str(A__ ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A_ : Any = cached_features_file + '.lock'
with FileLock(A__ ):
if os.path.exists(A__ ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
A_ : Any = torch.load(A__ )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
A_ : Union[str, Any] = token_classification_task.read_examples_from_file(A__ , A__ )
# TODO clean up all this to leverage built-in features of tokenizers
A_ : int = token_classification_task.convert_examples_to_features(
A__ , A__ , A__ , A__ , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=A__ , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F"""Saving features into cached file {cached_features_file}""" )
torch.save(self.features , A__ )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , snake_case_ ):
"""simple docstring"""
return self.features[i]
if is_tf_available():
import tensorflow as tf
class _UpperCAmelCase :
'''simple docstring'''
lowercase_ : List[str] = 42
lowercase_ : Optional[int] = -100
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = None , snake_case_=False , snake_case_ = Split.train , ):
"""simple docstring"""
A_ : Optional[Any] = token_classification_task.read_examples_from_file(A__ , A__ )
# TODO clean up all this to leverage built-in features of tokenizers
A_ : Tuple = token_classification_task.convert_examples_to_features(
A__ , A__ , A__ , A__ , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=A__ , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
A_ : Tuple = tf.data.Dataset.from_generator(
A__ , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa}, tf.intaa) , (
{'input_ids': tf.TensorShape([None] ), 'attention_mask': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
A_ : List[str] = tf.data.Dataset.from_generator(
A__ , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa, 'token_type_ids': tf.intaa}, tf.intaa) , (
{
'input_ids': tf.TensorShape([None] ),
'attention_mask': tf.TensorShape([None] ),
'token_type_ids': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , snake_case_ ):
"""simple docstring"""
return self.features[i]
| 700
|
"""simple docstring"""
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@parameterized.expand([(None,), ('foo.json',)] )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : Any = GenerationConfig(
do_sample=snake_case_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(snake_case_ , config_name=snake_case_ )
A_ : Dict = GenerationConfig.from_pretrained(snake_case_ , config_name=snake_case_ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , snake_case_ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0 )
self.assertEqual(loaded_config.max_length , 2_0 )
self.assertEqual(loaded_config.max_time , snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = AutoConfig.from_pretrained('gpt2' )
A_ : Union[str, Any] = GenerationConfig.from_model_config(snake_case_ )
A_ : Optional[Any] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(snake_case_ , snake_case_ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = GenerationConfig()
A_ : List[Any] = {
'max_new_tokens': 1_0_2_4,
'foo': 'bar',
}
A_ : Any = copy.deepcopy(snake_case_ )
A_ : int = generation_config.update(**snake_case_ )
# update_kwargs was not modified (no side effects)
self.assertEqual(snake_case_ , snake_case_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(snake_case_ , {'foo': 'bar'} )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Dict = GenerationConfig()
A_ : int = 'bar'
with tempfile.TemporaryDirectory('test-generation-config' ) as tmp_dir:
generation_config.save_pretrained(snake_case_ )
A_ : Union[str, Any] = GenerationConfig.from_pretrained(snake_case_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , 'bar' )
A_ : Optional[Any] = GenerationConfig.from_model_config(snake_case_ )
assert not hasattr(snake_case_ , 'foo' ) # no new kwargs should be initialized if from config
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : int = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , snake_case_ )
self.assertEqual(default_config.num_beams , 1 )
A_ : List[str] = GenerationConfig(
do_sample=snake_case_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , snake_case_ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(snake_case_ )
A_ : Dict = GenerationConfig.from_pretrained(snake_case_ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , snake_case_ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowerCamelCase_ ( cls ):
"""simple docstring"""
A_ : Tuple = TOKEN
HfFolder.save_token(snake_case_ )
@classmethod
def lowerCamelCase_ ( cls ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='test-generation-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-generation-config-org' )
except HTTPError:
pass
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : str = GenerationConfig(
do_sample=snake_case_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('test-generation-config' , use_auth_token=self._token )
A_ : str = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case_ , getattr(snake_case_ , snake_case_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-generation-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
snake_case_ , repo_id='test-generation-config' , push_to_hub=snake_case_ , use_auth_token=self._token )
A_ : Union[str, Any] = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case_ , getattr(snake_case_ , snake_case_ ) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Any = GenerationConfig(
do_sample=snake_case_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('valid_org/test-generation-config-org' , use_auth_token=self._token )
A_ : Optional[Any] = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case_ , getattr(snake_case_ , snake_case_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-generation-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
snake_case_ , repo_id='valid_org/test-generation-config-org' , push_to_hub=snake_case_ , use_auth_token=self._token )
A_ : Optional[int] = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case_ , getattr(snake_case_ , snake_case_ ) )
| 302
| 0
|
from __future__ import annotations
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if len(__a ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(__a )
or left < -len(__a )
or right >= len(__a )
or right < -len(__a )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
lowerCamelCase_ = (left + right) >> 1 # the middle
lowerCamelCase_ = find_max(__a , __a , __a ) # find max in range[left, mid]
lowerCamelCase_ = find_max(__a , mid + 1 , __a ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 463
|
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase_ ( __a , __a ) -> List[Any]:
"""simple docstring"""
assert isinstance(__a , __a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: Any =tmp_path / "cache"
lowerCamelCase__: Optional[int] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__: Tuple =ParquetDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: int =tmp_path / "cache"
lowerCamelCase__: Tuple ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Union[str, Any] =features.copy() if features else default_expected_features
lowerCamelCase__: Optional[int] =(
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__: int =ParquetDatasetReader(__a , features=__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: Any =tmp_path / "cache"
lowerCamelCase__: Optional[Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Optional[Any] =ParquetDatasetReader(__a , cache_dir=__a , split=__a ).read()
_check_parquet_dataset(__a , __a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def lowerCAmelCase_ ( __a , __a , __a ) -> int:
"""simple docstring"""
if issubclass(__a , __a ):
lowerCamelCase__: List[Any] =parquet_path
elif issubclass(__a , __a ):
lowerCamelCase__: str =[parquet_path]
lowerCamelCase__: Tuple =tmp_path / "cache"
lowerCamelCase__: Optional[Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: int =ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
def lowerCAmelCase_ ( __a , __a , __a=("train",) ) -> Dict:
"""simple docstring"""
assert isinstance(__a , __a )
for split in splits:
lowerCamelCase__: Tuple =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: List[Any] =tmp_path / "cache"
lowerCamelCase__: Optional[Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__: Tuple =ParquetDatasetReader(
{"train": parquet_path} , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCAmelCase_ ( __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__: Tuple =tmp_path / "cache"
lowerCamelCase__: Optional[int] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: List[Any] =features.copy() if features else default_expected_features
lowerCamelCase__: int =(
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__: Optional[Any] =ParquetDatasetReader({"train": parquet_path} , features=__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
if split:
lowerCamelCase__: Any ={split: parquet_path}
else:
lowerCamelCase__: int ="train"
lowerCamelCase__: Any ={"train": parquet_path, "test": parquet_path}
lowerCamelCase__: str =tmp_path / "cache"
lowerCamelCase__: Any ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: int =ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase_ ( __a , __a ) -> int:
"""simple docstring"""
lowerCamelCase__: List[str] =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCamelCase__: List[str] =pq.ParquetFile(tmp_path / "foo.parquet" )
lowerCamelCase__: List[str] =pf.read()
assert dataset.data.table == output_table
def lowerCAmelCase_ ( __a , __a ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: List[str] =str(shared_datadir / "test_image_rgb.jpg" )
lowerCamelCase__: Union[str, Any] ={"image": [image_path]}
lowerCamelCase__: Optional[Any] =Features({"image": Image()} )
lowerCamelCase__: Optional[int] =Dataset.from_dict(__a , features=__a )
lowerCamelCase__: Optional[int] =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCamelCase__: Dict =Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
lowerCamelCase__: Optional[Any] =ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=__a ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCAmelCase_ ( __a , __a ) -> Optional[Any]:
"""simple docstring"""
assert get_writer_batch_size(__a ) == expected
| 59
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __lowercase ( _A , unittest.TestCase ):
lowercase = ShapEImgaImgPipeline
lowercase = ['image']
lowercase = ['image']
lowercase = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
lowercase = False
@property
def __a ( self : int ) -> Union[str, Any]:
'''simple docstring'''
return 32
@property
def __a ( self : str ) -> Union[str, Any]:
'''simple docstring'''
return 32
@property
def __a ( self : List[Any] ) -> Any:
'''simple docstring'''
return self.time_input_dim * 4
@property
def __a ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
return 8
@property
def __a ( self : List[str] ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowercase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
lowercase = CLIPVisionModel(__lowerCamelCase )
return model
@property
def __a ( self : str ) -> Tuple:
'''simple docstring'''
lowercase = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=__lowerCamelCase , do_normalize=__lowerCamelCase , do_resize=__lowerCamelCase , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_24 , )
return image_processor
@property
def __a ( self : List[str] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
lowercase = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
lowercase = PriorTransformer(**__lowerCamelCase )
return model
@property
def __a ( self : str ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
lowercase = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
lowercase = ShapERenderer(**__lowerCamelCase )
return model
def __a ( self : Any ) -> int:
'''simple docstring'''
lowercase = self.dummy_prior
lowercase = self.dummy_image_encoder
lowercase = self.dummy_image_processor
lowercase = self.dummy_renderer
lowercase = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=10_24 , prediction_type='''sample''' , use_karras_sigmas=__lowerCamelCase , clip_sample=__lowerCamelCase , clip_sample_range=1.0 , )
lowercase = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def __a ( self : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any=0 ) -> int:
'''simple docstring'''
lowercase = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
if str(__lowerCamelCase ).startswith('''mps''' ):
lowercase = torch.manual_seed(__lowerCamelCase )
else:
lowercase = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
lowercase = {
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def __a ( self : Any ) -> Any:
'''simple docstring'''
lowercase = '''cpu'''
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**__lowerCamelCase )
lowercase = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowercase = pipe(**self.get_dummy_inputs(__lowerCamelCase ) )
lowercase = output.images[0]
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowercase = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __a ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __a ( self : int ) -> Tuple:
'''simple docstring'''
lowercase = torch_device == '''cpu'''
lowercase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__lowerCamelCase , relax_max_difference=__lowerCamelCase , )
def __a ( self : Optional[int] ) -> str:
'''simple docstring'''
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**__lowerCamelCase )
lowercase = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowercase = 1
lowercase = 2
lowercase = self.get_dummy_inputs(__lowerCamelCase )
for key in inputs.keys():
if key in self.batch_params:
lowercase = batch_size * [inputs[key]]
lowercase = pipe(**__lowerCamelCase , num_images_per_prompt=__lowerCamelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
def __a ( self : List[str] ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : Tuple ) -> int:
'''simple docstring'''
lowercase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
lowercase = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
lowercase = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowercase = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
lowercase = pipe(
__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
| 479
|
from __future__ import annotations
from collections.abc import MutableSequence
class __lowercase :
def __init__( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : MutableSequence[float] ) -> None:
'''simple docstring'''
if len(__lowerCamelCase ) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''' )
lowercase = list(__lowerCamelCase )
lowercase = degree
def __add__( self : Any , __lowerCamelCase : Polynomial ) -> Polynomial:
'''simple docstring'''
if self.degree > polynomial_a.degree:
lowercase = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , __lowerCamelCase )
else:
lowercase = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , __lowerCamelCase )
def __sub__( self : str , __lowerCamelCase : Polynomial ) -> Polynomial:
'''simple docstring'''
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : str ) -> Polynomial:
'''simple docstring'''
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : List[str] , __lowerCamelCase : Polynomial ) -> Polynomial:
'''simple docstring'''
lowercase = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , __lowerCamelCase )
def __a ( self : List[str] , __lowerCamelCase : int | float ) -> int | float:
'''simple docstring'''
lowercase = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : str ) -> str:
'''simple docstring'''
lowercase = ''''''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(__lowerCamelCase )
return polynomial
def __repr__( self : Tuple ) -> str:
'''simple docstring'''
return self.__str__()
def __a ( self : Union[str, Any] ) -> Polynomial:
'''simple docstring'''
lowercase = [0] * self.degree
for i in range(self.degree ):
lowercase = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , __lowerCamelCase )
def __a ( self : Union[str, Any] , __lowerCamelCase : int | float = 0 ) -> Polynomial:
'''simple docstring'''
lowercase = [0] * (self.degree + 2)
lowercase = constant
for i in range(self.degree + 1 ):
lowercase = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , __lowerCamelCase )
def __eq__( self : Tuple , __lowerCamelCase : object ) -> bool:
'''simple docstring'''
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Tuple , __lowerCamelCase : object ) -> bool:
'''simple docstring'''
return not self.__eq__(__lowerCamelCase )
| 479
| 1
|
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def snake_case ( snake_case__ :List[Any] , snake_case__ :Union[str, Any]) -> Any:
_A = k_size // 2
_A , _A = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
_A = 1 / (2 * pi * sigma) * exp(-(square(snake_case__) + square(snake_case__)) / (2 * square(snake_case__)))
return g
def snake_case ( snake_case__ :Any , snake_case__ :Dict , snake_case__ :Tuple) -> Any:
_A , _A = image.shape[0], image.shape[1]
# dst image height and width
_A = height - k_size + 1
_A = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
_A = zeros((dst_height * dst_width, k_size * k_size))
_A = 0
for i, j in product(range(snake_case__) , range(snake_case__)):
_A = ravel(image[i : i + k_size, j : j + k_size])
_A = window
row += 1
# turn the kernel into shape(k*k, 1)
_A = gen_gaussian_kernel(snake_case__ , snake_case__)
_A = ravel(snake_case__)
# reshape and get the dst image
_A = dot(snake_case__ , snake_case__).reshape(snake_case__ , snake_case__).astype(snake_case__)
return dst
if __name__ == "__main__":
# read original image
_SCREAMING_SNAKE_CASE = imread(R'../image_data/lena.jpg')
# turn image in gray scale value
_SCREAMING_SNAKE_CASE = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_SCREAMING_SNAKE_CASE = gaussian_filter(gray, 3, sigma=1)
_SCREAMING_SNAKE_CASE = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('gaussian filter with 3x3 mask', gaussianaxa)
imshow('gaussian filter with 5x5 mask', gaussianaxa)
waitKey()
| 401
|
def snake_case ( snake_case__ :int = 1_000) -> int:
_A = -1
_A = 0
for a in range(1 , n // 3):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
_A = (n * n - 2 * a * n) // (2 * n - 2 * a)
_A = n - a - b
if c * c == (a * a + b * b):
_A = a * b * c
if candidate >= product:
_A = candidate
return product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 401
| 1
|
'''simple docstring'''
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
a : Any = '%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input('Search: ')))
print('Googling.....')
a : Tuple = f'''https://www.google.com/search?q={query}&num=100'''
a : Optional[Any] = requests.get(
url,
headers={'User-Agent': str(UserAgent().random)},
)
try:
a : List[str] = (
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'yuRUbf'})
.find('a')
.get('href')
)
except AttributeError:
a : Optional[int] = parse_qs(
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'kCrYT'})
.find('a')
.get('href')
)['url'][0]
webbrowser.open(link)
| 593
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
class a :
def __init__( self : Optional[Any] , lowercase_ : int ):
snake_case_ = value
snake_case_ = None
snake_case_ = None
class a :
def __init__( self : str , lowercase_ : Node ):
snake_case_ = tree
def A_ ( self : int , lowercase_ : Node | None ):
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : Tuple ):
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 593
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase_ ( A_ , A_ , A_ ):
# Construct model
if gpta_config_file == "":
__lowerCamelCase = GPTaConfig()
else:
__lowerCamelCase = GPTaConfig.from_json_file(A_ )
__lowerCamelCase = GPTaModel(A_ )
# Load weights from numpy
load_tf_weights_in_gpta(A_ , A_ , A_ )
# Save pytorch-model
__lowerCamelCase = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
__lowerCamelCase = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , A_ )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(A_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_UpperCamelCase : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--gpt2_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
_UpperCamelCase : Dict =parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 316
|
'''simple docstring'''
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("To use the rich extension, install rich with `pip install rich`")
| 316
| 1
|
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=9_9 , _lowerCAmelCase=1_3 , _lowerCAmelCase=1_6 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=2 , _lowerCAmelCase=3_2 , _lowerCAmelCase=4 , _lowerCAmelCase=4 , _lowerCAmelCase=3_0 , _lowerCAmelCase=0 , _lowerCAmelCase=1 , _lowerCAmelCase=2 , _lowerCAmelCase=None , ):
_lowercase : Union[str, Any] = parent
_lowercase : int = batch_size
_lowercase : List[str] = decoder_seq_length
# For common tests
_lowercase : Union[str, Any] = self.decoder_seq_length
_lowercase : List[str] = is_training
_lowercase : List[str] = use_attention_mask
_lowercase : int = use_labels
_lowercase : Tuple = vocab_size
_lowercase : List[Any] = d_model
_lowercase : Any = d_model
_lowercase : Optional[int] = decoder_layers
_lowercase : List[str] = decoder_layers
_lowercase : Union[str, Any] = decoder_ffn_dim
_lowercase : Union[str, Any] = decoder_attention_heads
_lowercase : Optional[Any] = decoder_attention_heads
_lowercase : int = eos_token_id
_lowercase : List[Any] = bos_token_id
_lowercase : Optional[Any] = pad_token_id
_lowercase : int = decoder_start_token_id
_lowercase : Any = use_cache
_lowercase : Union[str, Any] = max_position_embeddings
_lowercase : str = None
_lowercase : Optional[Any] = decoder_seq_length
_lowercase : Union[str, Any] = 2
_lowercase : Dict = 1
def __a ( self ):
_lowercase : Any = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowercase : Optional[int] = None
if self.use_attention_mask:
_lowercase : Any = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
_lowercase : Optional[int] = None
if self.use_labels:
_lowercase : Any = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowercase : str = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
_lowercase : Optional[int] = True
_lowercase : List[Any] = TrOCRDecoder(config=_lowerCAmelCase ).to(_lowerCAmelCase ).eval()
_lowercase : int = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
_lowercase : List[Any] = model(_lowerCAmelCase , use_cache=_lowerCAmelCase )
_lowercase : Optional[Any] = model(_lowerCAmelCase )
_lowercase : Tuple = model(_lowerCAmelCase , use_cache=_lowerCAmelCase )
self.parent.assertTrue(len(_lowerCAmelCase ) == len(_lowerCAmelCase ) )
self.parent.assertTrue(len(_lowerCAmelCase ) == len(_lowerCAmelCase ) + 1 )
_lowercase : int = outputs['past_key_values']
# create hypothetical next token and extent to next_input_ids
_lowercase : Dict = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
_lowercase : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowercase : Dict = model(_lowerCAmelCase )['last_hidden_state']
_lowercase : Tuple = model(_lowerCAmelCase , past_key_values=_lowerCAmelCase )['last_hidden_state']
# select random slice
_lowercase : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowercase : Any = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
_lowercase : str = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 )
def __a ( self ):
_lowercase : Union[str, Any] = self.prepare_config_and_inputs()
_lowercase : Any = config_and_inputs
_lowercase : str = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
_UpperCamelCase : List[Any] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
_UpperCamelCase : Tuple = (TrOCRForCausalLM,) if is_torch_available() else ()
_UpperCamelCase : str = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
_UpperCamelCase : Dict = True
_UpperCamelCase : Optional[Any] = False
def __a ( self ):
_lowercase : Tuple = TrOCRStandaloneDecoderModelTester(self , is_training=_lowerCAmelCase )
_lowercase : Optional[Any] = ConfigTester(self , config_class=_lowerCAmelCase )
def __a ( self ):
pass
def __a ( self ):
pass
def __a ( self ):
pass
def __a ( self ):
self.config_tester.run_common_tests()
def __a ( self ):
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_lowerCAmelCase )
def __a ( self ):
return
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def __a ( self ):
pass
| 701
|
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
_lowercase : Tuple = {}
_lowercase : str = tokenizer(example['content'] , truncation=SCREAMING_SNAKE_CASE )['input_ids']
_lowercase : List[str] = len(example['content'] ) / len(output['input_ids'] )
return output
UpperCamelCase = HfArgumentParser(PretokenizationArguments)
UpperCamelCase = parser.parse_args()
if args.num_workers is None:
UpperCamelCase = multiprocessing.cpu_count()
UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCamelCase = time.time()
UpperCamelCase = load_dataset(args.dataset_name, split="train")
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCamelCase = time.time()
UpperCamelCase = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCamelCase = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 677
| 0
|
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler")
class lowerCamelCase:
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , snake_case_ = True , snake_case_ = False ):
_A = scheduler
_A = optimizers if isinstance(a_ , (list, tuple) ) else [optimizers]
_A = split_batches
_A = step_with_optimizer
_A = GradientState()
def lowerCAmelCase__ ( self , *snake_case_ , **snake_case_ ):
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*a_ , **a_ )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*a_ , **a_ )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
_A = AcceleratorState().num_processes
for _ in range(a_ ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , 'total_steps' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*a_ , **a_ )
else:
self.scheduler.step(*a_ , **a_ )
def lowerCAmelCase__ ( self ):
return self.scheduler.get_last_lr()
def lowerCAmelCase__ ( self ):
return self.scheduler.state_dict()
def lowerCAmelCase__ ( self , snake_case_ ):
self.scheduler.load_state_dict(a_ )
def lowerCAmelCase__ ( self ):
return self.scheduler.get_lr()
def lowerCAmelCase__ ( self , *snake_case_ , **snake_case_ ):
return self.scheduler.print_lr(*a_ , **a_ )
| 27
|
'''simple docstring'''
from manim import *
class _UpperCamelCase ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
def a__ ( self ) -> List[str]:
lowercase : List[Any] = Rectangle(height=0.5 , width=0.5 )
lowercase : str = Rectangle(height=0.25 , width=0.25 )
lowercase : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowercase : List[str] = [mem.copy() for i in range(6 )]
lowercase : Any = [mem.copy() for i in range(6 )]
lowercase : List[str] = VGroup(*a_ ).arrange(a_ , buff=0 )
lowercase : List[Any] = VGroup(*a_ ).arrange(a_ , buff=0 )
lowercase : List[Any] = VGroup(a_ , a_ ).arrange(a_ , buff=0 )
lowercase : Union[str, Any] = Text("CPU" , font_size=2_4 )
lowercase : List[Any] = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(a_ )
lowercase : List[Any] = [mem.copy() for i in range(4 )]
lowercase : Union[str, Any] = VGroup(*a_ ).arrange(a_ , buff=0 )
lowercase : Dict = Text("GPU" , font_size=2_4 )
lowercase : Tuple = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ )
gpu.move_to([-1, -1, 0] )
self.add(a_ )
lowercase : Tuple = [mem.copy() for i in range(6 )]
lowercase : Optional[int] = VGroup(*a_ ).arrange(a_ , buff=0 )
lowercase : Any = Text("Model" , font_size=2_4 )
lowercase : str = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ )
model.move_to([3, -1.0, 0] )
self.add(a_ )
lowercase : Dict = []
lowercase : Tuple = []
lowercase : List[Any] = []
for i, rect in enumerate(a_ ):
rect.set_stroke(a_ )
lowercase : Tuple = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(a_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=a_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=a_ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=a_ , buff=0.0 )
self.add(a_ )
model_cpu_arr.append(a_ )
self.add(*a_ , *a_ , *a_ )
lowercase : Any = [mem.copy() for i in range(6 )]
lowercase : Dict = VGroup(*a_ ).arrange(a_ , buff=0 )
lowercase : List[str] = Text("Loaded Checkpoint" , font_size=2_4 )
lowercase : Optional[Any] = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ )
checkpoint.move_to([3, 0.5, 0] )
self.add(a_ )
lowercase : Any = []
lowercase : int = []
for i, rect in enumerate(a_ ):
lowercase : str = fill.copy().set_fill(a_ , opacity=0.7 )
target.move_to(a_ )
ckpt_arr.append(a_ )
lowercase : Optional[Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(a_ )
self.add(*a_ , *a_ )
lowercase : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowercase : str = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(a_ , a_ )
lowercase : Any = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=1_8 , )
blue_text.next_to(a_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(a_ )
lowercase : List[Any] = MarkupText(
F'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
lowercase : Any = [meta_mem.copy() for i in range(6 )]
lowercase : Dict = [meta_mem.copy() for i in range(6 )]
lowercase : Union[str, Any] = VGroup(*a_ ).arrange(a_ , buff=0 )
lowercase : Dict = VGroup(*a_ ).arrange(a_ , buff=0 )
lowercase : Any = VGroup(a_ , a_ ).arrange(a_ , buff=0 )
lowercase : Optional[Any] = Text("Disk" , font_size=2_4 )
lowercase : List[str] = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(a_ , run_time=3 ) , Write(a_ , run_time=1 ) , Create(a_ , run_time=1 ) )
lowercase : Optional[Any] = []
for i, rect in enumerate(a_ ):
lowercase : int = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(a_ , run_time=1.5 ) )
self.play(*a_ )
self.play(FadeOut(a_ ) )
lowercase : List[Any] = MarkupText(F'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(a_ , run_time=3 ) )
self.play(
FadeOut(a_ , a_ , *a_ , *a_ ) , )
self.wait()
| 372
| 0
|
def _lowercase ( a_ : List[str] ) -> Tuple:
'''simple docstring'''
__magic_name__ = len(a_ )
__magic_name__ = sum(a_ )
__magic_name__ = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 ,n + 1 ):
__magic_name__ = True
for i in range(1 ,s + 1 ):
__magic_name__ = False
for i in range(1 ,n + 1 ):
for j in range(1 ,s + 1 ):
__magic_name__ = dp[i][j - 1]
if arr[i - 1] <= j:
__magic_name__ = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) ,-1 ,-1 ):
if dp[n][j] is True:
__magic_name__ = s - 2 * j
break
return diff
| 713
|
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self: Any ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(__UpperCamelCase ):
__magic_name__ = AutoConfig.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
__magic_name__ = FlaxAutoModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
@slow
def _SCREAMING_SNAKE_CASE ( self: Tuple ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(__UpperCamelCase ):
__magic_name__ = AutoConfig.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
__magic_name__ = FlaxAutoModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
@slow
def _SCREAMING_SNAKE_CASE ( self: Dict ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
__magic_name__ = AutoTokenizer.from_pretrained(__UpperCamelCase )
__magic_name__ = FlaxBertModel.from_pretrained(__UpperCamelCase )
__magic_name__ = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__UpperCamelCase: Optional[Any] ):
return model(**__UpperCamelCase )
eval(**__UpperCamelCase ).block_until_ready()
@slow
def _SCREAMING_SNAKE_CASE ( self: int ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
__magic_name__ = AutoTokenizer.from_pretrained(__UpperCamelCase )
__magic_name__ = FlaxRobertaModel.from_pretrained(__UpperCamelCase )
__magic_name__ = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__UpperCamelCase: Any ):
return model(**__UpperCamelCase )
eval(**__UpperCamelCase ).block_until_ready()
def _SCREAMING_SNAKE_CASE ( self: Tuple ):
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCamelCase , 'bert-base is not a local folder and is not a valid model identifier' ):
__magic_name__ = FlaxAutoModel.from_pretrained('bert-base' )
def _SCREAMING_SNAKE_CASE ( self: List[Any] ):
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCamelCase , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
__magic_name__ = FlaxAutoModel.from_pretrained(__UpperCamelCase , revision='aaaaaa' )
def _SCREAMING_SNAKE_CASE ( self: str ):
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCamelCase , 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack' , ):
__magic_name__ = FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def _SCREAMING_SNAKE_CASE ( self: int ):
'''simple docstring'''
with self.assertRaisesRegex(__UpperCamelCase , 'Use `from_pt=True` to load this model' ):
__magic_name__ = FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
| 184
| 0
|
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_SCREAMING_SNAKE_CASE : Dict = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
_SCREAMING_SNAKE_CASE : Any = 50_003
_SCREAMING_SNAKE_CASE : List[Any] = 50_002
@require_sentencepiece
@require_tokenizers
class A ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase : Dict = PLBartTokenizer
lowerCamelCase : Any = None
lowerCamelCase : List[str] = False
def UpperCAmelCase__ ( self : List[Any]):
super().setUp()
# We have a SentencePiece fixture for testing
_lowercase: Tuple = PLBartTokenizer(_UpperCamelCase , language_codes="base" , keep_accents=_UpperCamelCase)
tokenizer.save_pretrained(self.tmpdirname)
def UpperCAmelCase__ ( self : Optional[int]):
_lowercase: int = PLBartTokenizer(_UpperCamelCase , language_codes="base" , keep_accents=_UpperCamelCase)
_lowercase: str = tokenizer.tokenize("This is a test")
self.assertListEqual(_UpperCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCamelCase) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowercase: Dict = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
_UpperCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_lowercase: List[str] = tokenizer.convert_tokens_to_ids(_UpperCamelCase)
self.assertListEqual(
_UpperCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowercase: str = tokenizer.convert_ids_to_tokens(_UpperCamelCase)
self.assertListEqual(
_UpperCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
_lowercase: Optional[Any] = tokenizer.vocab_size
_lowercase: List[str] = [tokenizer.convert_ids_to_tokens(_UpperCamelCase) for x in range(end - 4 , _UpperCamelCase)]
self.assertListEqual(_UpperCamelCase , ["__java__", "__python__", "__en_XX__", "<mask>"])
_lowercase: Optional[int] = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
_lowercase: List[Any] = tokenizer(_UpperCamelCase).input_ids
self.assertEqual(
tokenizer.decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase) , _UpperCamelCase , )
def UpperCAmelCase__ ( self : Any):
_lowercase: Optional[int] = PLBartTokenizer(_UpperCamelCase , language_codes="multi" , keep_accents=_UpperCamelCase)
_lowercase: int = tokenizer.tokenize("This is a test")
self.assertListEqual(_UpperCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCamelCase) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowercase: str = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
_UpperCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_lowercase: List[str] = tokenizer.convert_tokens_to_ids(_UpperCamelCase)
self.assertListEqual(
_UpperCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowercase: Optional[Any] = tokenizer.convert_ids_to_tokens(_UpperCamelCase)
self.assertListEqual(
_UpperCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
_lowercase: Dict = tokenizer.vocab_size
_lowercase: Optional[Any] = [tokenizer.convert_ids_to_tokens(_UpperCamelCase) for x in range(end - 7 , _UpperCamelCase)]
self.assertListEqual(
_UpperCamelCase , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"])
_lowercase: Union[str, Any] = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
_lowercase: int = tokenizer(_UpperCamelCase).input_ids
self.assertEqual(
tokenizer.decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase) , _UpperCamelCase , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = """uclanlp/plbart-python-en_XX"""
lowerCamelCase : Dict = [
"""def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])""",
"""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""",
]
lowerCamelCase : Optional[int] = [
"""Returns the maximum value of a b c.""",
"""Sums the values of a b c.""",
]
lowerCamelCase : str = [
1_3_4,
5_4_5_2,
3_3_4_6_0,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
9_8_8,
2_0,
3_3_4_5_6,
1_9,
3_3_4_5_6,
7_7_1,
3_9,
4_2_5_8,
8_8_9,
3_3_1_8,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
2_4_7_1,
2,
PYTHON_CODE,
]
@classmethod
def UpperCAmelCase__ ( cls : int):
_lowercase: PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX")
_lowercase: Optional[int] = 1
return cls
def UpperCAmelCase__ ( self : Optional[Any]):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 50_001)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 50_002)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 50_003)
def UpperCAmelCase__ ( self : Dict):
_lowercase: List[Any] = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _UpperCamelCase)
def UpperCAmelCase__ ( self : int):
self.assertIn(_UpperCamelCase , self.tokenizer.all_special_ids)
_lowercase: List[Any] = [EN_CODE, 9_037, 33_442, 57, 752, 153, 14, 56, 18, 9, 2]
_lowercase: Optional[Any] = self.tokenizer.decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase)
_lowercase: List[str] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_UpperCamelCase)
self.assertEqual(_UpperCamelCase , _UpperCamelCase)
self.assertNotIn(self.tokenizer.eos_token , _UpperCamelCase)
def UpperCAmelCase__ ( self : int):
_lowercase: Union[str, Any] = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 20]
self.assertIsInstance(src_text[0] , _UpperCamelCase)
_lowercase: Tuple = 10
_lowercase: str = self.tokenizer(_UpperCamelCase , max_length=_UpperCamelCase , truncation=_UpperCamelCase).input_ids[0]
self.assertEqual(ids[-2] , 2)
self.assertEqual(ids[-1] , _UpperCamelCase)
self.assertEqual(len(_UpperCamelCase) , _UpperCamelCase)
def UpperCAmelCase__ ( self : Union[str, Any]):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"]) , [50_004, 50_001])
def UpperCAmelCase__ ( self : Union[str, Any]):
_lowercase: int = tempfile.mkdtemp()
_lowercase: Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_UpperCamelCase)
_lowercase: Dict = PLBartTokenizer.from_pretrained(_UpperCamelCase)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _UpperCamelCase)
@require_torch
def UpperCAmelCase__ ( self : Optional[int]):
_lowercase: int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_UpperCamelCase , return_tensors="pt")
_lowercase: Any = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id)
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE])
self.assertEqual(batch.decoder_input_ids[1][0] , _UpperCamelCase)
self.assertEqual(batch.decoder_input_ids[1][-1] , 2)
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE])
@require_torch
def UpperCAmelCase__ ( self : List[str]):
_lowercase: Tuple = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=len(self.expected_src_tokens) , return_tensors="pt" , )
_lowercase: Optional[Any] = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id)
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase)
self.assertEqual((2, 26) , batch.input_ids.shape)
self.assertEqual((2, 26) , batch.attention_mask.shape)
_lowercase: Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _UpperCamelCase)
self.assertEqual(2 , batch.decoder_input_ids[0, -1]) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [])
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE])
def UpperCAmelCase__ ( self : int):
_lowercase: Union[str, Any] = self.tokenizer(self.src_text , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=3 , return_tensors="pt")
_lowercase: Dict = self.tokenizer(
text_target=self.tgt_text , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=10 , return_tensors="pt")
_lowercase: Dict = targets["input_ids"]
_lowercase: List[Any] = shift_tokens_right(_UpperCamelCase , self.tokenizer.pad_token_id)
self.assertEqual(batch.input_ids.shape[1] , 3)
self.assertEqual(batch.decoder_input_ids.shape[1] , 10)
@require_torch
def UpperCAmelCase__ ( self : Optional[Any]):
_lowercase: Optional[int] = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java")
self.assertEqual(
nested_simplify(_UpperCamelCase) , {
# A, test, EOS, en_XX
"input_ids": [[150, 242, 2, 50_003]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 50_001,
} , )
| 226
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_SCREAMING_SNAKE_CASE : Optional[Any] = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def __lowerCAmelCase ( ):
_lowercase: List[Any] = _ask_options(
"In which compute environment are you running?" , ["This machine", "AWS (Amazon SageMaker)"] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_lowercase: Any = get_sagemaker_input()
else:
_lowercase: Tuple = get_cluster_input()
return config
def __lowerCAmelCase ( __magic_name__=None ):
if subparsers is not None:
_lowercase: List[Any] = subparsers.add_parser("config" , description=__magic_name__ )
else:
_lowercase: List[Any] = argparse.ArgumentParser("Accelerate config command" , description=__magic_name__ )
parser.add_argument(
"--config_file" , default=__magic_name__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=__magic_name__ )
return parser
def __lowerCAmelCase ( __magic_name__ ):
_lowercase: Union[str, Any] = get_user_input()
if args.config_file is not None:
_lowercase: Dict = args.config_file
else:
if not os.path.isdir(__magic_name__ ):
os.makedirs(__magic_name__ )
_lowercase: Tuple = default_yaml_config_file
if config_file.endswith(".json" ):
config.to_json_file(__magic_name__ )
else:
config.to_yaml_file(__magic_name__ )
print(f"accelerate configuration saved at {config_file}" )
def __lowerCAmelCase ( ):
_lowercase: int = config_command_parser()
_lowercase: List[Any] = parser.parse_args()
config_command(__magic_name__ )
if __name__ == "__main__":
main()
| 226
| 1
|
import logging
from transformers.configuration_utils import PretrainedConfig
lowerCamelCase : Any = logging.getLogger(__name__)
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """masked_bert"""
def __init__( self , A=3_0_5_2_2 , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=2 , A=0.02 , A=1e-1_2 , A=0 , A="topK" , A="constant" , A=0.0 , **A , ) -> Union[str, Any]:
super().__init__(pad_token_id=A , **A )
snake_case : Union[str, Any] = vocab_size
snake_case : int = hidden_size
snake_case : Optional[int] = num_hidden_layers
snake_case : Optional[int] = num_attention_heads
snake_case : Union[str, Any] = hidden_act
snake_case : Union[str, Any] = intermediate_size
snake_case : str = hidden_dropout_prob
snake_case : Any = attention_probs_dropout_prob
snake_case : List[str] = max_position_embeddings
snake_case : Optional[Any] = type_vocab_size
snake_case : Tuple = initializer_range
snake_case : List[Any] = layer_norm_eps
snake_case : Optional[Any] = pruning_method
snake_case : Any = mask_init
snake_case : List[Any] = mask_scale
| 709
|
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
lowerCamelCase : Union[str, Any] = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
lowerCamelCase : List[Any] = 'main'
# Default branch name
lowerCamelCase : Tuple = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
lowerCamelCase : List[Any] = 'aaaaaaa'
# This commit does not exist, so we should 404.
lowerCamelCase : List[Any] = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
lowerCamelCase : int = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
print("""Welcome!""" )
yield
print("""Bye!""" )
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
print("""Bonjour!""" )
yield
print("""Au revoir!""" )
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> int:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("""transformers""" ) is not None
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> Optional[Any]:
with ContextManagers([] ):
print("""Transformers are awesome!""" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> int:
with ContextManagers([context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> int:
with ContextManagers([context_fr(), context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" )
@require_torch
def UpperCAmelCase ( self ) -> Optional[Any]:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_tf
def UpperCAmelCase ( self ) -> str:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_flax
def UpperCAmelCase ( self ) -> Any:
# Flax models don't have labels
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , [] )
| 684
| 0
|
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ) -> int:
'''simple docstring'''
return 1 if input_a == input_a else 0
def __lowerCAmelCase ( ) -> None:
'''simple docstring'''
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 306
|
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ) -> int:
'''simple docstring'''
return int((input_a, input_a).count(1 ) != 0 )
def __lowerCAmelCase ( ) -> None:
'''simple docstring'''
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 306
| 1
|
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ ( __snake_case : Tuple , __snake_case : str , __snake_case : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return params[F'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :]
def lowercase_ ( __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : List[str]="attention" ) -> Tuple:
'''simple docstring'''
snake_case__ :List[str] = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] )
snake_case__ :int = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
snake_case__ :Dict = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] )
snake_case__ :Tuple = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
snake_case__ :Optional[Any] = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] )
snake_case__ :Union[str, Any] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
snake_case__ :List[str] = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] )
snake_case__ :str = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def lowercase_ ( __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : str , __snake_case : int=False ) -> List[Any]:
'''simple docstring'''
if split_mlp_wi:
snake_case__ :str = params[F'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :]
snake_case__ :str = params[F'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :]
snake_case__ :int = (wi_a, wi_a)
else:
snake_case__ :Optional[int] = params[F'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :]
snake_case__ :Optional[int] = params[F'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :]
return wi, wo
def lowercase_ ( __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : Any ) -> Optional[int]:
'''simple docstring'''
return params[F'{prefix}/{prefix}/{layer_name}/scale'][:, i]
def lowercase_ ( __snake_case : dict , *, __snake_case : int , __snake_case : bool , __snake_case : bool = False ) -> Dict:
'''simple docstring'''
snake_case__ :Tuple = traverse_util.flatten_dict(variables["target"] )
snake_case__ :Dict = {"/".join(__snake_case ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
snake_case__ :Optional[Any] = "encoder/encoder/mlp/wi_0/kernel" in old
print("Split MLP:" , __snake_case )
snake_case__ :Union[str, Any] = collections.OrderedDict()
# Shared embeddings.
snake_case__ :List[str] = old["token_embedder/embedding"]
# Encoder.
for i in range(__snake_case ):
# Block i, layer 0 (Self Attention).
snake_case__ :Tuple = tax_layer_norm_lookup(__snake_case , __snake_case , "encoder" , "pre_attention_layer_norm" )
snake_case__ , snake_case__ , snake_case__ , snake_case__ :List[str] = tax_attention_lookup(__snake_case , __snake_case , "encoder" , "attention" )
snake_case__ :str = layer_norm
snake_case__ :Optional[int] = k.T
snake_case__ :Tuple = o.T
snake_case__ :Optional[int] = q.T
snake_case__ :List[str] = v.T
# Block i, layer 1 (MLP).
snake_case__ :Any = tax_layer_norm_lookup(__snake_case , __snake_case , "encoder" , "pre_mlp_layer_norm" )
snake_case__ , snake_case__ :Union[str, Any] = tax_mlp_lookup(__snake_case , __snake_case , "encoder" , __snake_case )
snake_case__ :List[Any] = layer_norm
if split_mlp_wi:
snake_case__ :int = wi[0].T
snake_case__ :Optional[Any] = wi[1].T
else:
snake_case__ :List[str] = wi.T
snake_case__ :Dict = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
snake_case__ :str = tax_relpos_bias_lookup(
__snake_case , __snake_case , "encoder" ).T
snake_case__ :int = old["encoder/encoder_norm/scale"]
if not scalable_attention:
snake_case__ :List[Any] = tax_relpos_bias_lookup(
__snake_case , 0 , "encoder" ).T
snake_case__ :Union[str, Any] = tax_relpos_bias_lookup(
__snake_case , 0 , "decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(__snake_case ):
# Block i, layer 0 (Self Attention).
snake_case__ :Dict = tax_layer_norm_lookup(__snake_case , __snake_case , "decoder" , "pre_self_attention_layer_norm" )
snake_case__ , snake_case__ , snake_case__ , snake_case__ :List[Any] = tax_attention_lookup(__snake_case , __snake_case , "decoder" , "self_attention" )
snake_case__ :List[Any] = layer_norm
snake_case__ :Tuple = k.T
snake_case__ :Optional[int] = o.T
snake_case__ :Tuple = q.T
snake_case__ :List[str] = v.T
# Block i, layer 1 (Cross Attention).
snake_case__ :List[Any] = tax_layer_norm_lookup(__snake_case , __snake_case , "decoder" , "pre_cross_attention_layer_norm" )
snake_case__ , snake_case__ , snake_case__ , snake_case__ :int = tax_attention_lookup(__snake_case , __snake_case , "decoder" , "encoder_decoder_attention" )
snake_case__ :List[Any] = layer_norm
snake_case__ :Optional[Any] = k.T
snake_case__ :List[Any] = o.T
snake_case__ :str = q.T
snake_case__ :Optional[Any] = v.T
# Block i, layer 2 (MLP).
snake_case__ :Optional[int] = tax_layer_norm_lookup(__snake_case , __snake_case , "decoder" , "pre_mlp_layer_norm" )
snake_case__ , snake_case__ :Dict = tax_mlp_lookup(__snake_case , __snake_case , "decoder" , __snake_case )
snake_case__ :int = layer_norm
if split_mlp_wi:
snake_case__ :Optional[Any] = wi[0].T
snake_case__ :List[str] = wi[1].T
else:
snake_case__ :Optional[int] = wi.T
snake_case__ :Optional[int] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
snake_case__ :Optional[Any] = tax_relpos_bias_lookup(__snake_case , __snake_case , "decoder" ).T
snake_case__ :Dict = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
snake_case__ :Optional[Any] = old["decoder/logits_dense/kernel"].T
return new
def lowercase_ ( __snake_case : str , __snake_case : bool ) -> Any:
'''simple docstring'''
snake_case__ :Optional[Any] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
snake_case__ :Tuple = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
snake_case__ :Optional[Any] = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
snake_case__ :Optional[int] = state_dict["shared.weight"]
return state_dict
def lowercase_ ( __snake_case : str , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Any ) -> Optional[int]:
'''simple docstring'''
snake_case__ :List[str] = checkpoints.load_tax_checkpoint(__snake_case )
snake_case__ :List[Any] = convert_tax_to_pytorch(
__snake_case , num_layers=config.num_layers , is_encoder_only=__snake_case , scalable_attention=__snake_case )
snake_case__ :Optional[Any] = make_state_dict(__snake_case , __snake_case )
model.load_state_dict(__snake_case , strict=__snake_case )
def lowercase_ ( __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : bool = False , __snake_case : bool = False , ) -> List[Any]:
'''simple docstring'''
snake_case__ :Dict = MTaConfig.from_json_file(__snake_case )
print(F'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
snake_case__ :Any = UMTaEncoderModel(__snake_case )
else:
snake_case__ :List[Any] = UMTaForConditionalGeneration(__snake_case )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__snake_case )
# Verify that we can load the checkpoint.
model.from_pretrained(__snake_case )
print("Done" )
if __name__ == "__main__":
__UpperCAmelCase : Dict = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.")
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False
)
parser.add_argument(
"--scalable_attention",
action="store_true",
help="Whether the model uses scaled attention (umt5 model)",
default=False,
)
__UpperCAmelCase : str = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 57
|
from __future__ import annotations
def lowercase_ ( __snake_case : list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(__snake_case ) / len(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57
| 1
|
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"split_dict" , [
SplitDict(),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 , dataset_name="my_dataset" )} ),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 )} ),
SplitDict({"train": SplitInfo()} ),
] , )
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> List[str]:
UpperCAmelCase_ = split_dict._to_yaml_list()
assert len(UpperCAmelCase_ ) == len(UpperCAmelCase_ )
UpperCAmelCase_ = SplitDict._from_yaml_list(UpperCAmelCase_ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
UpperCAmelCase_ = None
# the split name of split_dict takes over the name of the split info object
UpperCAmelCase_ = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"split_info" , [SplitInfo(), SplitInfo(dataset_name=UpperCAmelCase_ ), SplitInfo(dataset_name="my_dataset" )] )
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Any:
UpperCAmelCase_ = asdict(SplitDict({"train": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 579
|
import os
UpperCamelCase__ = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1_000}
def UpperCamelCase__ ( UpperCAmelCase_ ) -> int:
'''simple docstring'''
_lowercase : Optional[int] = 0
_lowercase : Dict = 0
while index < len(UpperCAmelCase_ ) - 1:
_lowercase : Any = SYMBOLS[numerals[index]]
_lowercase : List[Any] = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCamelCase__ ( UpperCAmelCase_ ) -> str:
'''simple docstring'''
_lowercase : List[str] = ''''''
_lowercase : Union[str, Any] = num // 1000
numerals += m_count * "M"
num %= 1000
_lowercase : Tuple = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
_lowercase : int = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCamelCase__ ( UpperCAmelCase_ = "/p089_roman.txt" ) -> int:
'''simple docstring'''
_lowercase : List[str] = 0
with open(os.path.dirname(UpperCAmelCase_ ) + roman_numerals_filename ) as filea:
_lowercase : Optional[Any] = filea.readlines()
for line in lines:
_lowercase : int = line.strip()
_lowercase : Dict = parse_roman_numerals(UpperCAmelCase_ )
_lowercase : Optional[Any] = generate_roman_numerals(UpperCAmelCase_ )
savings += len(UpperCAmelCase_ ) - len(UpperCAmelCase_ )
return savings
if __name__ == "__main__":
print(F"""{solution() = }""")
| 322
| 0
|
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
A__ : Optional[Any] = {'UserAgent': UserAgent().random}
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
_lowercase: Tuple = script.contents[0]
_lowercase: Tuple = json.loads(data[data.find('''{"config"''' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __magic_name__ :
def __init__( self , A_ ) -> int:
"""simple docstring"""
_lowercase: str = f'''https://www.instagram.com/{username}/'''
_lowercase: Dict = self.get_json()
def lowercase_ ( self ) -> dict:
"""simple docstring"""
_lowercase: str = requests.get(self.url , headers=A_ ).text
_lowercase: Optional[int] = BeautifulSoup(A_ , '''html.parser''' ).find_all('''script''' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ) -> str:
"""simple docstring"""
return f'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self ) -> str:
"""simple docstring"""
return f'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def lowercase_ ( self ) -> str:
"""simple docstring"""
return self.user_data["username"]
@property
def lowercase_ ( self ) -> str:
"""simple docstring"""
return self.user_data["full_name"]
@property
def lowercase_ ( self ) -> str:
"""simple docstring"""
return self.user_data["biography"]
@property
def lowercase_ ( self ) -> str:
"""simple docstring"""
return self.user_data["business_email"]
@property
def lowercase_ ( self ) -> str:
"""simple docstring"""
return self.user_data["external_url"]
@property
def lowercase_ ( self ) -> int:
"""simple docstring"""
return self.user_data["edge_followed_by"]["count"]
@property
def lowercase_ ( self ) -> int:
"""simple docstring"""
return self.user_data["edge_follow"]["count"]
@property
def lowercase_ ( self ) -> int:
"""simple docstring"""
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def lowercase_ ( self ) -> str:
"""simple docstring"""
return self.user_data["profile_pic_url_hd"]
@property
def lowercase_ ( self ) -> bool:
"""simple docstring"""
return self.user_data["is_verified"]
@property
def lowercase_ ( self ) -> bool:
"""simple docstring"""
return self.user_data["is_private"]
def _lowerCAmelCase ( _UpperCamelCase = "github" ):
"""simple docstring"""
import os
if os.environ.get('''CI''' ):
return # test failing on GitHub Actions
_lowercase: Tuple = InstagramUser(_UpperCamelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _UpperCamelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120_000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('''https://instagram.''' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
A__ : List[str] = InstagramUser('github')
print(instagram_user)
print(f"""{instagram_user.number_of_posts = }""")
print(f"""{instagram_user.number_of_followers = }""")
print(f"""{instagram_user.number_of_followings = }""")
print(f"""{instagram_user.email = }""")
print(f"""{instagram_user.website = }""")
print(f"""{instagram_user.profile_picture_url = }""")
print(f"""{instagram_user.is_verified = }""")
print(f"""{instagram_user.is_private = }""")
| 272
|
"""simple docstring"""
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
_lowercase: Tuple = [True] * (num + 1)
_lowercase: List[str] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , _UpperCamelCase ):
_lowercase: List[str] = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
A__ : List[Any] = int(input('Enter a positive integer: ').strip())
print(prime_sieve_eratosthenes(user_num))
| 272
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class _A ( unittest.TestCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=99 , __lowerCAmelCase=32 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=37 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=16 , __lowerCAmelCase=2 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=4 , ):
"""simple docstring"""
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_attention_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_choices
def A__ ( self ):
"""simple docstring"""
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_attention_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def A__ ( self ):
"""simple docstring"""
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase = config_and_inputs
lowercase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class _A ( lowerCAmelCase , unittest.TestCase ):
snake_case__ : List[Any] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def A__ ( self ):
"""simple docstring"""
lowercase = FlaxAlbertModelTester(self )
@slow
def A__ ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase = model_class_name.from_pretrained("""albert-base-v2""" )
lowercase = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowerCAmelCase )
@require_flax
class _A ( unittest.TestCase ):
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = FlaxAlbertModel.from_pretrained("""albert-base-v2""" )
lowercase = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowercase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowercase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0]
lowercase = (1, 11, 768)
self.assertEqual(output.shape , __lowerCAmelCase )
lowercase = np.array(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1E-4 ) )
| 359
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : int =logging.get_logger(__name__)
__lowerCAmelCase : Optional[Any] ={
"""uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""",
}
class _A ( lowerCAmelCase ):
snake_case__ : Optional[int] = 'mra'
def __init__( self , __lowerCAmelCase=5_0265 , __lowerCAmelCase=768 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=1 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1E-5 , __lowerCAmelCase="absolute" , __lowerCAmelCase=4 , __lowerCAmelCase="full" , __lowerCAmelCase=0 , __lowerCAmelCase=0 , __lowerCAmelCase=1 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
lowercase = vocab_size
lowercase = max_position_embeddings
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = type_vocab_size
lowercase = layer_norm_eps
lowercase = position_embedding_type
lowercase = block_per_row
lowercase = approx_mode
lowercase = initial_prior_first_n_blocks
lowercase = initial_prior_diagonal_n_blocks
| 359
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
A_ : int = logging.get_logger(__name__)
class a_ ( __snake_case ):
'''simple docstring'''
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
warnings.warn(
'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use GLPNImageProcessor instead.', A_, )
super().__init__(*A_, **A_ )
| 716
|
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : Dict = value_function
lowerCamelCase__ : int = unet
lowerCamelCase__ : Union[str, Any] = scheduler
lowerCamelCase__ : int = env
lowerCamelCase__ : List[Any] = env.get_dataset()
lowerCamelCase__ : Dict = {}
for key in self.data.keys():
try:
lowerCamelCase__ : Optional[Any] = self.data[key].mean()
except: # noqa: E722
pass
lowerCamelCase__ : Optional[int] = {}
for key in self.data.keys():
try:
lowerCamelCase__ : Tuple = self.data[key].std()
except: # noqa: E722
pass
lowerCamelCase__ : Optional[Any] = env.observation_space.shape[0]
lowerCamelCase__ : List[str] = env.action_space.shape[0]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if type(lowerCamelCase_ ) is dict:
return {k: self.to_torch(lowerCamelCase_ ) for k, v in x_in.items()}
elif torch.is_tensor(lowerCamelCase_ ):
return x_in.to(self.unet.device )
return torch.tensor(lowerCamelCase_, device=self.unet.device )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
for key, val in cond.items():
lowerCamelCase__ : Optional[Any] = val.clone()
return x_in
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = x.shape[0]
lowerCamelCase__ : Tuple = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowerCamelCase__ : Dict = torch.full((batch_size,), lowerCamelCase_, device=self.unet.device, dtype=torch.long )
for _ in range(lowerCamelCase_ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowerCamelCase__ : str = self.value_function(x.permute(0, 2, 1 ), lowerCamelCase_ ).sample
lowerCamelCase__ : Union[str, Any] = torch.autograd.grad([y.sum()], [x] )[0]
lowerCamelCase__ : Optional[int] = self.scheduler._get_variance(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = torch.exp(0.5 * posterior_variance )
lowerCamelCase__ : Tuple = model_std * grad
lowerCamelCase__ : str = 0
lowerCamelCase__ : Dict = x.detach()
lowerCamelCase__ : Dict = x + scale * grad
lowerCamelCase__ : Optional[int] = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : Tuple = self.unet(x.permute(0, 2, 1 ), lowerCamelCase_ ).sample.permute(0, 2, 1 )
# TODO: verify deprecation of this kwarg
lowerCamelCase__ : Optional[Any] = self.scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, predict_epsilon=lowerCamelCase_ )['prev_sample']
# apply conditions to the trajectory (set the initial state)
lowerCamelCase__ : Any = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : List[str] = self.to_torch(lowerCamelCase_ )
return x, y
def __call__(self, lowerCamelCase_, lowerCamelCase_=6_4, lowerCamelCase_=3_2, lowerCamelCase_=2, lowerCamelCase_=0.1 ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.normalize(lowerCamelCase_, 'observations' )
lowerCamelCase__ : List[str] = obs[None].repeat(lowerCamelCase_, axis=0 )
lowerCamelCase__ : str = {0: self.to_torch(lowerCamelCase_ )}
lowerCamelCase__ : Optional[Any] = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowerCamelCase__ : List[Any] = randn_tensor(lowerCamelCase_, device=self.unet.device )
lowerCamelCase__ : int = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : List[str] = self.to_torch(lowerCamelCase_ )
# run the diffusion process
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.run_diffusion(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
# sort output trajectories by value
lowerCamelCase__ : Union[str, Any] = y.argsort(0, descending=lowerCamelCase_ ).squeeze()
lowerCamelCase__ : List[str] = x[sorted_idx]
lowerCamelCase__ : Optional[Any] = sorted_values[:, :, : self.action_dim]
lowerCamelCase__ : Union[str, Any] = actions.detach().cpu().numpy()
lowerCamelCase__ : Union[str, Any] = self.de_normalize(lowerCamelCase_, key='actions' )
# select the action with the highest value
if y is not None:
lowerCamelCase__ : str = 0
else:
# if we didn't run value guiding, select a random action
lowerCamelCase__ : Optional[Any] = np.random.randint(0, lowerCamelCase_ )
lowerCamelCase__ : Tuple = denorm_actions[selected_index, 0]
return denorm_actions
| 696
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = ["pixel_values"]
def __init__( self : List[str] ,UpperCamelCase : bool = True ,UpperCamelCase : Dict[str, int] = None ,UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC ,UpperCamelCase : bool = True ,UpperCamelCase : Union[int, float] = 1 / 255 ,UpperCamelCase : bool = True ,UpperCamelCase : Optional[Union[float, List[float]]] = None ,UpperCamelCase : Optional[Union[float, List[float]]] = None ,UpperCamelCase : bool = True ,**UpperCamelCase : Optional[int] ,) -> None:
super().__init__(**UpperCamelCase )
_lowercase : Any = size if size is not None else {'height': 384, 'width': 384}
_lowercase : Tuple = get_size_dict(UpperCamelCase ,default_to_square=UpperCamelCase )
_lowercase : Any = do_resize
_lowercase : Tuple = size
_lowercase : Optional[Any] = resample
_lowercase : Any = do_rescale
_lowercase : List[Any] = rescale_factor
_lowercase : str = do_normalize
_lowercase : Any = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_lowercase : Dict = image_std if image_std is not None else OPENAI_CLIP_STD
_lowercase : str = do_convert_rgb
def _lowerCamelCase ( self : int ,UpperCamelCase : np.ndarray ,UpperCamelCase : Dict[str, int] ,UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC ,UpperCamelCase : Optional[Union[str, ChannelDimension]] = None ,**UpperCamelCase : Union[str, Any] ,) -> np.ndarray:
_lowercase : List[Any] = get_size_dict(UpperCamelCase ,default_to_square=UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
_lowercase : List[Any] = (size['height'], size['width'])
return resize(UpperCamelCase ,size=UpperCamelCase ,resample=UpperCamelCase ,data_format=UpperCamelCase ,**UpperCamelCase )
def _lowerCamelCase ( self : Union[str, Any] ,UpperCamelCase : np.ndarray ,UpperCamelCase : Union[int, float] ,UpperCamelCase : Optional[Union[str, ChannelDimension]] = None ,**UpperCamelCase : Union[str, Any] ,) -> Tuple:
return rescale(UpperCamelCase ,scale=UpperCamelCase ,data_format=UpperCamelCase ,**UpperCamelCase )
def _lowerCamelCase ( self : List[str] ,UpperCamelCase : np.ndarray ,UpperCamelCase : Union[float, List[float]] ,UpperCamelCase : Union[float, List[float]] ,UpperCamelCase : Optional[Union[str, ChannelDimension]] = None ,**UpperCamelCase : Union[str, Any] ,) -> np.ndarray:
return normalize(UpperCamelCase ,mean=UpperCamelCase ,std=UpperCamelCase ,data_format=UpperCamelCase ,**UpperCamelCase )
def _lowerCamelCase ( self : Union[str, Any] ,UpperCamelCase : ImageInput ,UpperCamelCase : Optional[bool] = None ,UpperCamelCase : Optional[Dict[str, int]] = None ,UpperCamelCase : PILImageResampling = None ,UpperCamelCase : Optional[bool] = None ,UpperCamelCase : Optional[float] = None ,UpperCamelCase : Optional[bool] = None ,UpperCamelCase : Optional[Union[float, List[float]]] = None ,UpperCamelCase : Optional[Union[float, List[float]]] = None ,UpperCamelCase : Optional[Union[str, TensorType]] = None ,UpperCamelCase : bool = None ,UpperCamelCase : ChannelDimension = ChannelDimension.FIRST ,**UpperCamelCase : List[str] ,) -> PIL.Image.Image:
_lowercase : Tuple = do_resize if do_resize is not None else self.do_resize
_lowercase : int = resample if resample is not None else self.resample
_lowercase : str = do_rescale if do_rescale is not None else self.do_rescale
_lowercase : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowercase : int = do_normalize if do_normalize is not None else self.do_normalize
_lowercase : Optional[Any] = image_mean if image_mean is not None else self.image_mean
_lowercase : int = image_std if image_std is not None else self.image_std
_lowercase : Optional[int] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_lowercase : int = size if size is not None else self.size
_lowercase : Dict = get_size_dict(UpperCamelCase ,default_to_square=UpperCamelCase )
_lowercase : Optional[int] = make_list_of_images(UpperCamelCase )
if not valid_images(UpperCamelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_lowercase : Optional[int] = [convert_to_rgb(UpperCamelCase ) for image in images]
# All transformations expect numpy arrays.
_lowercase : Any = [to_numpy_array(UpperCamelCase ) for image in images]
if do_resize:
_lowercase : Optional[Any] = [self.resize(image=UpperCamelCase ,size=UpperCamelCase ,resample=UpperCamelCase ) for image in images]
if do_rescale:
_lowercase : Optional[Any] = [self.rescale(image=UpperCamelCase ,scale=UpperCamelCase ) for image in images]
if do_normalize:
_lowercase : str = [self.normalize(image=UpperCamelCase ,mean=UpperCamelCase ,std=UpperCamelCase ) for image in images]
_lowercase : Optional[int] = [to_channel_dimension_format(UpperCamelCase ,UpperCamelCase ) for image in images]
_lowercase : Dict = BatchFeature(data={'pixel_values': images} ,tensor_type=UpperCamelCase )
return encoded_outputs
| 125
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
A = logging.get_logger(__name__) # pylint: disable=invalid-name
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self : str ,UpperCamelCase : Any ,UpperCamelCase : int ) -> Dict:
super().__init__()
self.register_modules(unet=UpperCamelCase ,scheduler=UpperCamelCase )
@torch.no_grad()
def __call__( self : Any ,UpperCamelCase : int = 1 ,UpperCamelCase : int = 100 ,UpperCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,UpperCamelCase : Optional[float] = None ,UpperCamelCase : bool = True ,) -> Union[AudioPipelineOutput, Tuple]:
if audio_length_in_s is None:
_lowercase : Dict = self.unet.config.sample_size / self.unet.config.sample_rate
_lowercase : List[str] = audio_length_in_s * self.unet.config.sample_rate
_lowercase : Optional[Any] = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
F''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
_lowercase : Optional[int] = int(UpperCamelCase )
if sample_size % down_scale_factor != 0:
_lowercase : Dict = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
F''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
' process.' )
_lowercase : Optional[Any] = int(UpperCamelCase )
_lowercase : List[Any] = next(iter(self.unet.parameters() ) ).dtype
_lowercase : Optional[Any] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(UpperCamelCase ,UpperCamelCase ) and len(UpperCamelCase ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(UpperCamelCase )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
_lowercase : int = randn_tensor(UpperCamelCase ,generator=UpperCamelCase ,device=self.device ,dtype=UpperCamelCase )
# set step values
self.scheduler.set_timesteps(UpperCamelCase ,device=audio.device )
_lowercase : Optional[Any] = self.scheduler.timesteps.to(UpperCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_lowercase : str = self.unet(UpperCamelCase ,UpperCamelCase ).sample
# 2. compute previous image: x_t -> t_t-1
_lowercase : Dict = self.scheduler.step(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ).prev_sample
_lowercase : Optional[int] = audio.clamp(-1 ,1 ).float().cpu().numpy()
_lowercase : Tuple = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=UpperCamelCase )
| 125
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE_ = {
'''configuration_groupvit''': [
'''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''GroupViTConfig''',
'''GroupViTOnnxConfig''',
'''GroupViTTextConfig''',
'''GroupViTVisionConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GroupViTModel''',
'''GroupViTPreTrainedModel''',
'''GroupViTTextModel''',
'''GroupViTVisionModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFGroupViTModel''',
'''TFGroupViTPreTrainedModel''',
'''TFGroupViTTextModel''',
'''TFGroupViTVisionModel''',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 183
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _UpperCAmelCase ( unittest.TestCase ):
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=9_9 , lowercase_=3_2 , lowercase_=5 , lowercase_=4 , lowercase_=3_7 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=1_6 , lowercase_=2 , lowercase_=0.0_2 , lowercase_=4 , ) -> Dict:
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_attention_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_choices
def a_ ( self ) -> int:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_attention_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def a_ ( self ) -> List[str]:
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def a_ ( self ) -> List[Any]:
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = True
UpperCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = True
__SCREAMING_SNAKE_CASE : int = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a_ ( self ) -> Optional[int]:
UpperCAmelCase = FlaxRobertaPreLayerNormModelTester(self )
@slow
def a_ ( self ) -> Dict:
for model_class_name in self.all_model_classes:
UpperCAmelCase = model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowercase_ )
UpperCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowercase_ )
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
@slow
def a_ ( self ) -> Tuple:
UpperCAmelCase = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowercase_ )
UpperCAmelCase = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
UpperCAmelCase = model(lowercase_ )[0]
UpperCAmelCase = [1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape ) , lowercase_ )
# compare the actual values for a slice.
UpperCAmelCase = np.array(
[[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase_ , atol=1E-4 ) )
@slow
def a_ ( self ) -> int:
UpperCAmelCase = FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowercase_ )
UpperCAmelCase = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
UpperCAmelCase = model(lowercase_ )[0]
# compare the actual values for a slice.
UpperCAmelCase = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase_ , atol=1E-4 ) )
| 183
| 1
|
from __future__ import annotations
def a__ ( lowercase__ , lowercase__ , lowercase__ , ):
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
|
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCamelCase : Dict = logging.get_logger(__name__)
class __lowerCAmelCase ( enum.Enum ):
UpperCamelCase__ = 0
UpperCamelCase__ = 1
@add_end_docstrings(__magic_name__ )
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = '''generated'''
def __init__( self :Any , *__magic_name__ :Tuple , **__magic_name__ :Tuple ):
'''simple docstring'''
super().__init__(*__magic_name__ , **__magic_name__ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Any=None , __magic_name__ :Optional[Any]=None , __magic_name__ :Any=None , __magic_name__ :List[str]=None , __magic_name__ :Tuple=None , __magic_name__ :str=None , **__magic_name__ :List[Any] , ):
'''simple docstring'''
a = {}
if truncation is not None:
a = truncation
a = generate_kwargs
a = {}
if return_tensors is not None and return_type is None:
a = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
a = return_type
if clean_up_tokenization_spaces is not None:
a = clean_up_tokenization_spaces
if stop_sequence is not None:
a = self.tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
if len(__magic_name__ ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
a = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :int , __magic_name__ :int , __magic_name__ :int ):
'''simple docstring'''
return True
def lowerCamelCase__ ( self :Dict , *__magic_name__ :Optional[int] , __magic_name__ :List[str] ):
'''simple docstring'''
a = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , __magic_name__ ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
a = ([prefix + arg for arg in args[0]],)
a = True
elif isinstance(args[0] , __magic_name__ ):
a = (prefix + args[0],)
a = False
else:
raise ValueError(
F' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`' )
a = self.tokenizer(*__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self :Tuple , *__magic_name__ :Any , **__magic_name__ :str ):
'''simple docstring'''
a = super().__call__(*__magic_name__ , **__magic_name__ )
if (
isinstance(args[0] , __magic_name__ )
and all(isinstance(__magic_name__ , __magic_name__ ) for el in args[0] )
and all(len(__magic_name__ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def lowerCamelCase__ ( self :Dict , __magic_name__ :Optional[Any] , __magic_name__ :List[str]=TruncationStrategy.DO_NOT_TRUNCATE , **__magic_name__ :Any ):
'''simple docstring'''
a = self._parse_and_tokenize(__magic_name__ , truncation=__magic_name__ , **__magic_name__ )
return inputs
def lowerCamelCase__ ( self :Any , __magic_name__ :int , **__magic_name__ :int ):
'''simple docstring'''
if self.framework == "pt":
a , a = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
a , a = tf.shape(model_inputs["""input_ids"""] ).numpy()
a = generate_kwargs.get("""min_length""" , self.model.config.min_length )
a = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(__magic_name__ , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
a = self.model.generate(**__magic_name__ , **__magic_name__ )
a = output_ids.shape[0]
if self.framework == "pt":
a = output_ids.reshape(__magic_name__ , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
a = tf.reshape(__magic_name__ , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Dict , __magic_name__ :Any=ReturnType.TEXT , __magic_name__ :int=False ):
'''simple docstring'''
a = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
a = {F'{self.return_name}_token_ids': output_ids}
elif return_type == ReturnType.TEXT:
a = {
F'{self.return_name}_text': self.tokenizer.decode(
__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ , )
}
records.append(__magic_name__ )
return records
@add_end_docstrings(__magic_name__ )
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = '''summary'''
def __call__( self :Any , *__magic_name__ :List[str] , **__magic_name__ :Optional[int] ):
'''simple docstring'''
return super().__call__(*__magic_name__ , **__magic_name__ )
def lowerCamelCase__ ( self :Any , __magic_name__ :int , __magic_name__ :int , __magic_name__ :int ):
'''simple docstring'''
if max_length < min_length:
logger.warning(F'Your min_length={min_length} must be inferior than your max_length={max_length}.' )
if input_length < max_length:
logger.warning(
F'Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
F'consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})' )
@add_end_docstrings(__magic_name__ )
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = '''translation'''
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :int , __magic_name__ :int , __magic_name__ :int ):
'''simple docstring'''
if input_length > 0.9 * max_length:
logger.warning(
F'Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def lowerCamelCase__ ( self :str , *__magic_name__ :Union[str, Any] , __magic_name__ :Any=TruncationStrategy.DO_NOT_TRUNCATE , __magic_name__ :Optional[Any]=None , __magic_name__ :List[str]=None ):
'''simple docstring'''
if getattr(self.tokenizer , """_build_translation_inputs""" , __magic_name__ ):
return self.tokenizer._build_translation_inputs(
*__magic_name__ , return_tensors=self.framework , truncation=__magic_name__ , src_lang=__magic_name__ , tgt_lang=__magic_name__ )
else:
return super()._parse_and_tokenize(*__magic_name__ , truncation=__magic_name__ )
def lowerCamelCase__ ( self :int , __magic_name__ :List[str]=None , __magic_name__ :Union[str, Any]=None , **__magic_name__ :Optional[int] ):
'''simple docstring'''
a , a , a = super()._sanitize_parameters(**__magic_name__ )
if src_lang is not None:
a = src_lang
if tgt_lang is not None:
a = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
a = kwargs.get("""task""" , self.task )
a = task.split("""_""" )
if task and len(__magic_name__ ) == 4:
# translation, XX, to YY
a = items[1]
a = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self :Optional[Any] , *__magic_name__ :Any , **__magic_name__ :str ):
'''simple docstring'''
return super().__call__(*__magic_name__ , **__magic_name__ )
| 468
| 0
|
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=1024):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = [], []
SCREAMING_SNAKE_CASE = list(zip(_UpperCAmelCase , _UpperCAmelCase))
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = sorted_examples[0]
def is_too_big(_UpperCAmelCase):
return tok(_UpperCAmelCase , return_tensors='pt').input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:]):
SCREAMING_SNAKE_CASE = new_src + ' ' + src
SCREAMING_SNAKE_CASE = new_tgt + ' ' + tgt
if is_too_big(_UpperCAmelCase) or is_too_big(_UpperCAmelCase): # cant fit, finalize example
finished_src.append(_UpperCAmelCase)
finished_tgt.append(_UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = src, tgt
else: # can fit, keep adding
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(_UpperCAmelCase)
finished_tgt.append(_UpperCAmelCase)
return finished_src, finished_tgt
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = Path(_UpperCAmelCase)
save_path.mkdir(exist_ok=_UpperCAmelCase)
for split in ["train"]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
SCREAMING_SNAKE_CASE = [x.rstrip() for x in Path(_UpperCAmelCase).open().readlines()]
SCREAMING_SNAKE_CASE = [x.rstrip() for x in Path(_UpperCAmelCase).open().readlines()]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = pack_examples(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
print(F'''packed {split} split from {len(_UpperCAmelCase)} examples -> {len(_UpperCAmelCase)}.''')
Path(save_path / F'''{split}.source''').open('w').write('\n'.join(_UpperCAmelCase))
Path(save_path / F'''{split}.target''').open('w').write('\n'.join(_UpperCAmelCase))
for split in ["val", "test"]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
shutil.copyfile(_UpperCAmelCase , save_path / F'''{split}.source''')
shutil.copyfile(_UpperCAmelCase , save_path / F'''{split}.target''')
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('--tok_name' , type=_UpperCAmelCase , help='like facebook/bart-large-cnn,t5-base, etc.')
parser.add_argument('--max_seq_len' , type=_UpperCAmelCase , default=128)
parser.add_argument('--data_dir' , type=_UpperCAmelCase)
parser.add_argument('--save_path' , type=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = parser.parse_args()
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(args.tok_name)
return pack_data_dir(_UpperCAmelCase , Path(args.data_dir) , args.max_seq_len , args.save_path)
if __name__ == "__main__":
packer_cli()
| 444
|
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
a_ : Dict = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
a_ : List[str] = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
a_ : Union[str, Any] = 'zero2'
a_ : List[Any] = 'zero3'
a_ : List[str] = [ZEROa, ZEROa]
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
SCREAMING_SNAKE_CASE = parameterized.to_safe_name('_'.join(str(_UpperCAmelCase) for x in param.args))
return F'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
a_ : List[Any] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class _snake_case ( A__ ):
@parameterized.expand(a , name_func=a)
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> List[str]:
self.run_and_check(
stage=a , model=a , distributed=a , fpaa=a , )
@require_torch_multi_gpu
@parameterized.expand(a , name_func=a)
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> List[Any]:
self.run_and_check(
stage=a , model=a , distributed=a , fpaa=a , )
@parameterized.expand(a , name_func=a)
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> int:
self.run_and_check(
stage=a , model=a , distributed=a , fpaa=a , )
@require_torch_multi_gpu
@parameterized.expand(a , name_func=a)
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> List[Any]:
self.run_and_check(
stage=a , model=a , distributed=a , fpaa=a , )
def SCREAMING_SNAKE_CASE__ ( self , a) -> Dict:
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def SCREAMING_SNAKE_CASE__ ( self , a , a , a = 10 , a = True , a = True , a = True , ) -> Dict:
SCREAMING_SNAKE_CASE = models[model]
SCREAMING_SNAKE_CASE = self.run_trainer(
stage=a , model_name=a , eval_steps=a , num_train_epochs=1 , distributed=a , fpaa=a , )
self.do_checks(a)
return output_dir
def SCREAMING_SNAKE_CASE__ ( self , a , a , a = 10 , a = 1 , a = True , a = True , ) -> List[str]:
SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir('./xxx' , after=a)
SCREAMING_SNAKE_CASE = f'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(a)}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['--fp16'])
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
SCREAMING_SNAKE_CASE = f'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
SCREAMING_SNAKE_CASE = [f'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
SCREAMING_SNAKE_CASE = self.get_launcher(a)
SCREAMING_SNAKE_CASE = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(a , env=self.get_env())
return output_dir
def SCREAMING_SNAKE_CASE__ ( self , a=False) -> Optional[int]:
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
SCREAMING_SNAKE_CASE = min(2 , get_gpu_count()) if distributed else 1
return f'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
| 444
| 1
|
'''simple docstring'''
import math
from collections.abc import Iterator
from itertools import takewhile
def UpperCamelCase_( snake_case : Union[str, Any] ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = 2
while True:
if is_prime(lowercase_ ):
yield num
num += 1
def UpperCamelCase_( snake_case : Optional[int] = 2_0_0_0_0_0_0 ):
'''simple docstring'''
return sum(takewhile(lambda snake_case : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 400
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : List[str] = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''openai-gpt'''
UpperCAmelCase__ = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Dict=40_478 , UpperCAmelCase__ : str=512 , UpperCAmelCase__ : Union[str, Any]=768 , UpperCAmelCase__ : Optional[Any]=12 , UpperCAmelCase__ : Any=12 , UpperCAmelCase__ : Optional[Any]="gelu" , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : List[str]=1e-5 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : Any="cls_index" , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Optional[Any]=0.1 , **UpperCAmelCase__ : Dict , ) ->Any:
'''simple docstring'''
A__ = vocab_size
A__ = n_positions
A__ = n_embd
A__ = n_layer
A__ = n_head
A__ = afn
A__ = resid_pdrop
A__ = embd_pdrop
A__ = attn_pdrop
A__ = layer_norm_epsilon
A__ = initializer_range
A__ = summary_type
A__ = summary_use_proj
A__ = summary_activation
A__ = summary_first_dropout
A__ = summary_proj_to_labels
super().__init__(**UpperCAmelCase__)
| 87
| 0
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : str = logging.get_logger(__name__)
class UpperCamelCase__( lowercase_ ):
__magic_name__ : int = '''encoder-decoder'''
__magic_name__ : Dict = True
def __init__( self : List[Any] , **lowerCAmelCase : Dict )-> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
UpperCAmelCase = kwargs.pop('''encoder''' )
UpperCAmelCase = encoder_config.pop('''model_type''' )
UpperCAmelCase = kwargs.pop('''decoder''' )
UpperCAmelCase = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
UpperCAmelCase = AutoConfig.for_model(lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = AutoConfig.for_model(lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = True
@classmethod
def a__( cls : Any , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Tuple )-> Tuple:
"""simple docstring"""
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
UpperCAmelCase = True
UpperCAmelCase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCAmelCase )
def a__( self : Optional[Any] )-> Dict:
"""simple docstring"""
UpperCAmelCase = copy.deepcopy(self.__dict__ )
UpperCAmelCase = self.encoder.to_dict()
UpperCAmelCase = self.decoder.to_dict()
UpperCAmelCase = self.__class__.model_type
return output
| 719
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def a__( self : Optional[int] )-> str:
"""simple docstring"""
UpperCAmelCase = [[1, 2, 4], [1, 2, 3, 4]]
UpperCAmelCase = DisjunctiveConstraint(lowerCAmelCase )
self.assertTrue(isinstance(dc.token_ids , lowerCAmelCase ) )
with self.assertRaises(lowerCAmelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(lowerCAmelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def a__( self : Union[str, Any] )-> str:
"""simple docstring"""
UpperCAmelCase = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowerCAmelCase ):
DisjunctiveConstraint(lowerCAmelCase ) # fails here
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = [[1, 2, 3], [1, 2, 4]]
UpperCAmelCase = DisjunctiveConstraint(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(1 )
UpperCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(2 )
UpperCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(3 )
UpperCAmelCase = stepped is True and completed is True and reset is False
self.assertTrue(lowerCAmelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def a__( self : int )-> Dict:
"""simple docstring"""
UpperCAmelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
UpperCAmelCase = DisjunctiveConstraint(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 50
| 0
|
"""simple docstring"""
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def SCREAMING_SNAKE_CASE__ ( snake_case : Sequence[float] , snake_case : int , snake_case : int )-> tuple[int | None, int | None, float]:
'''simple docstring'''
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
UpperCAmelCase__ : List[Any] = (low + high) // 2
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = max_subarray(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = max_subarray(UpperCamelCase__ , mid + 1 , UpperCamelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = max_cross_sum(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def SCREAMING_SNAKE_CASE__ ( snake_case : Sequence[float] , snake_case : int , snake_case : int , snake_case : int )-> tuple[int, int, float]:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = float("-inf" ), -1
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = float("-inf" ), -1
UpperCAmelCase__ : Union[str, Any] = 0
for i in range(UpperCamelCase__ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
UpperCAmelCase__ : Optional[int] = summ
UpperCAmelCase__ : List[str] = i
UpperCAmelCase__ : Tuple = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
UpperCAmelCase__ : Optional[Any] = summ
UpperCAmelCase__ : Optional[Any] = i
return max_left, max_right, (left_sum + right_sum)
def SCREAMING_SNAKE_CASE__ ( snake_case : int )-> float:
'''simple docstring'''
UpperCAmelCase__ : Dict = [randint(1 , UpperCamelCase__ ) for _ in range(UpperCamelCase__ )]
UpperCAmelCase__ : List[str] = time.time()
max_subarray(UpperCamelCase__ , 0 , input_size - 1 )
UpperCAmelCase__ : int = time.time()
return end - start
def SCREAMING_SNAKE_CASE__ ( )-> None:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = [10, 100, 1000, 1_0000, 5_0000, 10_0000, 20_0000, 30_0000, 40_0000, 50_0000]
UpperCAmelCase__ : Dict = [time_max_subarray(UpperCamelCase__ ) for input_size in input_sizes]
print("No of Inputs\t\tTime Taken" )
for input_size, runtime in zip(UpperCamelCase__ , UpperCamelCase__ ):
print(UpperCamelCase__ , "\t\t" , UpperCamelCase__ )
plt.plot(UpperCamelCase__ , UpperCamelCase__ )
plt.xlabel("Number of Inputs" )
plt.ylabel("Time taken in seconds" )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 438
|
from ..utils import DummyObject, requires_backends
class a ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] = ['note_seq']
def __init__( self : Dict , *lowerCamelCase__ : int , **lowerCamelCase__ : List[str] ) -> str:
"""simple docstring"""
requires_backends(self , ['''note_seq'''] )
@classmethod
def UpperCAmelCase_ ( cls : int , *lowerCamelCase__ : Dict , **lowerCamelCase__ : int ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
@classmethod
def UpperCAmelCase_ ( cls : Tuple , *lowerCamelCase__ : Any , **lowerCamelCase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
| 332
| 0
|
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
_lowercase : str = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int ):
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
f'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
f''' reinstalling {pkg}.''' )
if not ops[op](version.parse(_SCREAMING_SNAKE_CASE ) , version.parse(_SCREAMING_SNAKE_CASE ) ):
raise ImportError(
f'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def snake_case__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] = None ):
"""simple docstring"""
lowerCamelCase__ : int =f'''\n{hint}''' if hint is not None else ''''''
# non-versioned check
if re.match(R'''^[\w_\-\d]+$''' , _SCREAMING_SNAKE_CASE ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple =requirement, None, None
else:
lowerCamelCase__ : List[Any] =re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , _SCREAMING_SNAKE_CASE )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
f''' got {requirement}''' )
lowerCamelCase__ , lowerCamelCase__ : Tuple =match[0]
lowerCamelCase__ : Optional[Any] =want_full.split(''',''' ) # there could be multiple requirements
lowerCamelCase__ : str ={}
for w in want_range:
lowerCamelCase__ : Any =re.findall(R'''^([\s!=<>]{1,2})(.+)''' , _SCREAMING_SNAKE_CASE )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
f''' but got {requirement}''' )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =match[0]
lowerCamelCase__ : int =want_ver
if op not in ops:
raise ValueError(f'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
lowerCamelCase__ : str ='''.'''.join([str(_SCREAMING_SNAKE_CASE ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return
# check if any version is installed
try:
lowerCamelCase__ : List[str] =importlib.metadata.version(_SCREAMING_SNAKE_CASE )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def snake_case__ ( __lowerCamelCase : Tuple ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] ='''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 701
|
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : list , __lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
if index == number_of_items:
return 0
lowerCamelCase__ : Optional[int] =0
lowerCamelCase__ : Union[str, Any] =0
lowerCamelCase__ : List[str] =knapsack(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , index + 1 )
if weights[index] <= max_weight:
lowerCamelCase__ : Dict =values[index] + knapsack(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , max_weight - weights[index] , index + 1 )
return max(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 625
| 0
|
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : str ):
__A = ""
__A = ""
__A = []
__A = 0
__A = 2_56
__A = 0
__A = 0
__A = 0
__A = 0
def UpperCamelCase_ ( self : Union[str, Any] ,A : Dict ):
__A = cva.imread(A ,0 )
__A = copy.deepcopy(self.img )
__A , __A , __A = plt.hist(self.img.ravel() ,2_56 ,[0, 2_56] ,label="x" )
__A = np.sum(A )
for i in range(len(A ) ):
__A = x[i] / self.k
self.sk += prk
__A = (self.L - 1) * self.sk
if self.rem != 0:
__A = int(last % last )
__A = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(A )
__A = int(np.ma.count(self.img ) / self.img[1].size )
__A = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
__A = self.img[j][i]
if num != self.last_list[num]:
__A = self.last_list[num]
cva.imwrite("output_data/output.jpg" ,self.img )
def UpperCamelCase_ ( self : Optional[Any] ):
plt.hist(self.img.ravel() ,2_56 ,[0, 2_56] )
def UpperCamelCase_ ( self : Any ):
cva.imshow("Output-Image" ,self.img )
cva.imshow("Input-Image" ,self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Union[str, Any] = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
SCREAMING_SNAKE_CASE :Tuple = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 55
|
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_lowercase : Union[str, Any] =logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.14.0""", """To fix: pip install -r examples/pytorch/audio-classification/requirements.txt""")
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = 1_60_00 ):
lowerCamelCase_ : List[str] = int(round(sample_rate * max_length ) )
if len(lowerCAmelCase__ ) <= sample_length:
return wav
lowerCamelCase_ : int = randint(0 ,len(lowerCAmelCase__ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class UpperCamelCase_ :
_a : Optional[str] = field(default=snake_case__ , metadata={'help': 'Name of a dataset from the datasets package'} )
_a : Optional[str] = field(
default=snake_case__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
_a : Optional[str] = field(
default=snake_case__ , metadata={'help': 'A file containing the training audio paths and labels.'} )
_a : Optional[str] = field(
default=snake_case__ , metadata={'help': 'A file containing the validation audio paths and labels.'} )
_a : str = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
_a : str = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
_a : str = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
_a : str = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''} )
_a : Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_a : Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
_a : float = field(
default=2_0 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class UpperCamelCase_ :
_a : str = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
_a : Optional[str] = field(
default=snake_case__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_a : Optional[str] = field(
default=snake_case__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'} )
_a : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_a : Optional[str] = field(
default=snake_case__ , metadata={'help': 'Name or path of preprocessor config.'} )
_a : bool = field(
default=snake_case__ , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'} )
_a : bool = field(
default=snake_case__ , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'} )
_a : bool = field(
default=snake_case__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_a : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
_a : bool = field(
default=snake_case__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def __a ( self : Optional[int] ):
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'The argument `--freeze_feature_extractor` is deprecated and '
'will be removed in a future version. Use `--freeze_feature_encoder`'
'instead. Setting `freeze_feature_encoder==True`.' , lowerCamelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'The argument `--freeze_feature_extractor` is deprecated and '
'should not be used in combination with `--freeze_feature_encoder`.'
'Only make use of `--freeze_feature_encoder`.' )
def _SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase_ : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_audio_classification' ,lowerCAmelCase__ ,lowerCAmelCase__ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' ,datefmt='%m/%d/%Y %H:%M:%S' ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase_ : List[str] = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase__ )
transformers.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} "
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
lowerCamelCase_ : Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ : Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to train from scratch.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset and prepare it for the audio classification task.
lowerCamelCase_ : Optional[int] = DatasetDict()
lowerCamelCase_ : Dict = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split=data_args.train_split_name ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCamelCase_ : List[str] = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split=data_args.eval_split_name ,use_auth_token=True if model_args.use_auth_token else None ,)
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. "
'Make sure to set `--audio_column_name` to the correct audio column - one of '
F"{', '.join(raw_datasets['train'].column_names )}." )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. "
'Make sure to set `--label_column_name` to the correct text column - one of '
F"{', '.join(raw_datasets['train'].column_names )}." )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
lowerCamelCase_ : Dict = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path ,return_attention_mask=model_args.attention_mask ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
lowerCamelCase_ : Optional[Any] = raw_datasets.cast_column(
data_args.audio_column_name ,datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
lowerCamelCase_ : Optional[int] = feature_extractor.model_input_names[0]
def train_transforms(lowerCAmelCase__ ):
lowerCamelCase_ : Optional[int] = []
for audio in batch[data_args.audio_column_name]:
lowerCamelCase_ : Union[str, Any] = random_subsample(
audio['array'] ,max_length=data_args.max_length_seconds ,sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(lowerCAmelCase__ )
lowerCamelCase_ : int = feature_extractor(lowerCAmelCase__ ,sampling_rate=feature_extractor.sampling_rate )
lowerCamelCase_ : Optional[Any] = {model_input_name: inputs.get(lowerCAmelCase__ )}
lowerCamelCase_ : Any = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(lowerCAmelCase__ ):
lowerCamelCase_ : Dict = [audio['array'] for audio in batch[data_args.audio_column_name]]
lowerCamelCase_ : Optional[Any] = feature_extractor(lowerCAmelCase__ ,sampling_rate=feature_extractor.sampling_rate )
lowerCamelCase_ : Optional[int] = {model_input_name: inputs.get(lowerCAmelCase__ )}
lowerCamelCase_ : Tuple = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowerCamelCase_ : Optional[int] = raw_datasets['train'].features[data_args.label_column_name].names
lowerCamelCase_ , lowerCamelCase_ : Optional[int] = {}, {}
for i, label in enumerate(lowerCAmelCase__ ):
lowerCamelCase_ : List[Any] = str(lowerCAmelCase__ )
lowerCamelCase_ : Union[str, Any] = label
# Load the accuracy metric from the datasets package
lowerCamelCase_ : Tuple = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase__ ):
lowerCamelCase_ : Tuple = np.argmax(eval_pred.predictions ,axis=1 )
return metric.compute(predictions=lowerCAmelCase__ ,references=eval_pred.label_ids )
lowerCamelCase_ : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path ,num_labels=len(lowerCAmelCase__ ) ,labelaid=lowerCAmelCase__ ,idalabel=lowerCAmelCase__ ,finetuning_task='audio-classification' ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCamelCase_ : Optional[int] = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('.ckpt' in model_args.model_name_or_path ) ,config=lowerCAmelCase__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,ignore_mismatched_sizes=model_args.ignore_mismatched_sizes ,)
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCamelCase_ : List[Any] = (
raw_datasets['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(lowerCAmelCase__ ,output_all_columns=lowerCAmelCase__ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCamelCase_ : List[str] = (
raw_datasets['eval'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(lowerCAmelCase__ ,output_all_columns=lowerCAmelCase__ )
# Initialize our trainer
lowerCamelCase_ : str = Trainer(
model=lowerCAmelCase__ ,args=lowerCAmelCase__ ,train_dataset=raw_datasets['train'] if training_args.do_train else None ,eval_dataset=raw_datasets['eval'] if training_args.do_eval else None ,compute_metrics=lowerCAmelCase__ ,tokenizer=lowerCAmelCase__ ,)
# Training
if training_args.do_train:
lowerCamelCase_ : List[Any] = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ : Dict = last_checkpoint
lowerCamelCase_ : Optional[int] = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
trainer.save_model()
trainer.log_metrics('train' ,train_result.metrics )
trainer.save_metrics('train' ,train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowerCamelCase_ : str = trainer.evaluate()
trainer.log_metrics('eval' ,lowerCAmelCase__ )
trainer.save_metrics('eval' ,lowerCAmelCase__ )
# Write model card and (optionally) push to hub
lowerCamelCase_ : List[Any] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'audio-classification',
'dataset': data_args.dataset_name,
'tags': ['audio-classification'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase__ )
else:
trainer.create_model_card(**lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 364
| 0
|
from __future__ import annotations
import math
import random
from typing import Any
class lowerCAmelCase__ :
def __init__( self : List[str]):
A__ : list[Any] = []
A__ : int = 0
A__ : int = 0
def _lowercase ( self : Dict):
return self.head == self.tail
def _lowercase ( self : Any , _A : Any):
self.data.append(_A)
A__ : Dict = self.tail + 1
def _lowercase ( self : List[str]):
A__ : List[str] = self.data[self.head]
A__ : List[str] = self.head + 1
return ret
def _lowercase ( self : Optional[int]):
return self.tail - self.head
def _lowercase ( self : Union[str, Any]):
print(self.data)
print("**************")
print(self.data[self.head : self.tail])
class lowerCAmelCase__ :
def __init__( self : str , _A : Any):
A__ : Union[str, Any] = data
A__ : MyNode | None = None
A__ : MyNode | None = None
A__ : int = 1
def _lowercase ( self : int):
return self.data
def _lowercase ( self : str):
return self.left
def _lowercase ( self : str):
return self.right
def _lowercase ( self : List[str]):
return self.height
def _lowercase ( self : int , _A : Any):
A__ : Any = data
def _lowercase ( self : str , _A : MyNode | None):
A__ : str = node
def _lowercase ( self : Dict , _A : MyNode | None):
A__ : Optional[int] = node
def _lowercase ( self : Optional[Any] , _A : int):
A__ : Any = height
def snake_case__ ( __lowercase ) -> int:
"""simple docstring"""
if node is None:
return 0
return node.get_height()
def snake_case__ ( __lowercase , __lowercase ) -> int:
"""simple docstring"""
if a > b:
return a
return b
def snake_case__ ( __lowercase ) -> MyNode:
"""simple docstring"""
print("left rotation node:" , node.get_data() )
A__ : int = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(__lowercase )
A__ : Tuple = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__lowercase )
A__ : Union[str, Any] = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(__lowercase )
return ret
def snake_case__ ( __lowercase ) -> MyNode:
"""simple docstring"""
print("right rotation node:" , node.get_data() )
A__ : List[Any] = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(__lowercase )
A__ : Union[str, Any] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__lowercase )
A__ : int = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(__lowercase )
return ret
def snake_case__ ( __lowercase ) -> MyNode:
"""simple docstring"""
A__ : Tuple = node.get_left()
assert left_child is not None
node.set_left(left_rotation(__lowercase ) )
return right_rotation(__lowercase )
def snake_case__ ( __lowercase ) -> MyNode:
"""simple docstring"""
A__ : int = node.get_right()
assert right_child is not None
node.set_right(right_rotation(__lowercase ) )
return left_rotation(__lowercase )
def snake_case__ ( __lowercase , __lowercase ) -> MyNode | None:
"""simple docstring"""
if node is None:
return MyNode(__lowercase )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , __lowercase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
A__ : List[str] = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
A__ : Optional[int] = right_rotation(__lowercase )
else:
A__ : Dict = lr_rotation(__lowercase )
else:
node.set_right(insert_node(node.get_right() , __lowercase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
A__ : Optional[Any] = node.get_right()
assert right_child is not None
if data < right_child.get_data():
A__ : List[str] = rl_rotation(__lowercase )
else:
A__ : Optional[int] = left_rotation(__lowercase )
A__ : List[Any] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__lowercase )
return node
def snake_case__ ( __lowercase ) -> Any:
"""simple docstring"""
while True:
A__ : List[Any] = root.get_right()
if right_child is None:
break
A__ : List[str] = right_child
return root.get_data()
def snake_case__ ( __lowercase ) -> Any:
"""simple docstring"""
while True:
A__ : Dict = root.get_left()
if left_child is None:
break
A__ : Tuple = left_child
return root.get_data()
def snake_case__ ( __lowercase , __lowercase ) -> MyNode | None:
"""simple docstring"""
A__ : Tuple = root.get_left()
A__ : Union[str, Any] = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
A__ : Optional[Any] = get_left_most(__lowercase )
root.set_data(__lowercase )
root.set_right(del_node(__lowercase , __lowercase ) )
elif left_child is not None:
A__ : int = left_child
elif right_child is not None:
A__ : int = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("No such data" )
return root
else:
root.set_left(del_node(__lowercase , __lowercase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(__lowercase , __lowercase ) )
if get_height(__lowercase ) - get_height(__lowercase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
A__ : Dict = left_rotation(__lowercase )
else:
A__ : Union[str, Any] = rl_rotation(__lowercase )
elif get_height(__lowercase ) - get_height(__lowercase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
A__ : Any = right_rotation(__lowercase )
else:
A__ : List[Any] = lr_rotation(__lowercase )
A__ : List[str] = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(__lowercase )
return root
class lowerCAmelCase__ :
def __init__( self : str):
A__ : MyNode | None = None
def _lowercase ( self : Union[str, Any]):
return get_height(self.root)
def _lowercase ( self : Union[str, Any] , _A : Any):
print("insert:" + str(_A))
A__ : Any = insert_node(self.root , _A)
def _lowercase ( self : Any , _A : Any):
print("delete:" + str(_A))
if self.root is None:
print("Tree is empty!")
return
A__ : Tuple = del_node(self.root , _A)
def __str__( self : Tuple , ): # a level traversale, gives a more intuitive look on the tree
A__ : List[Any] = ""
A__ : List[str] = MyQueue()
q.push(self.root)
A__ : List[Any] = self.get_height()
if layer == 0:
return output
A__ : Optional[Any] = 0
while not q.is_empty():
A__ : int = q.pop()
A__ : Optional[Any] = " " * int(math.pow(2 , layer - 1))
output += space
if node is None:
output += "*"
q.push(_A)
q.push(_A)
else:
output += str(node.get_data())
q.push(node.get_left())
q.push(node.get_right())
output += space
A__ : Union[str, Any] = cnt + 1
for i in range(100):
if cnt == math.pow(2 , _A) - 1:
A__ : Any = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def snake_case__ ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
snake_case : str = AVLtree()
snake_case : Optional[Any] = list(range(1_0))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 182
|
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def snake_case__ ( __lowercase ) -> bool:
"""simple docstring"""
A__ : int = int(number**0.5 )
return number == sq * sq
def snake_case__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> tuple[int, int]:
"""simple docstring"""
A__ : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
A__ : int = x_den * y_den * z_den
A__ : int = gcd(__lowercase , __lowercase )
top //= hcf
bottom //= hcf
return top, bottom
def snake_case__ ( __lowercase = 3_5 ) -> int:
"""simple docstring"""
A__ : set = set()
A__ : int
A__ : Fraction = Fraction(0 )
A__ : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
A__ : Any = x_num * y_den + x_den * y_num
A__ : List[Any] = x_den * y_den
A__ : List[Any] = gcd(__lowercase , __lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
A__ : List[Any] = add_three(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
unique_s.add(__lowercase )
# n=2
A__ : Any = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
A__ : Optional[int] = x_den * x_den * y_den * y_den
if is_sq(__lowercase ) and is_sq(__lowercase ):
A__ : Union[str, Any] = int(sqrt(__lowercase ) )
A__ : int = int(sqrt(__lowercase ) )
A__ : Any = gcd(__lowercase , __lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
A__ : List[Any] = add_three(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
unique_s.add(__lowercase )
# n=-1
A__ : Tuple = x_num * y_num
A__ : int = x_den * y_num + x_num * y_den
A__ : List[str] = gcd(__lowercase , __lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
A__ : str = add_three(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
unique_s.add(__lowercase )
# n=2
A__ : Any = x_num * x_num * y_num * y_num
A__ : List[str] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__lowercase ) and is_sq(__lowercase ):
A__ : Optional[int] = int(sqrt(__lowercase ) )
A__ : List[Any] = int(sqrt(__lowercase ) )
A__ : Union[str, Any] = gcd(__lowercase , __lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
A__ : Optional[Any] = add_three(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
unique_s.add(__lowercase )
for num, den in unique_s:
total += Fraction(__lowercase , __lowercase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"""{solution() = }""")
| 182
| 1
|
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger('transformers.models.encodec')
_SCREAMING_SNAKE_CASE = {
'quantizer.vq.layers.*._codebook.inited': 'quantizer.layers.*.codebook.inited',
'quantizer.vq.layers.*._codebook.cluster_size': 'quantizer.layers.*.codebook.cluster_size',
'quantizer.vq.layers.*._codebook.embed': 'quantizer.layers.*.codebook.embed',
'quantizer.vq.layers.*._codebook.embed_avg': 'quantizer.layers.*.codebook.embed_avg',
}
_SCREAMING_SNAKE_CASE = {
'encoder.model.0.conv.conv': 'encoder.layers.0.conv',
'encoder.model.1.block.1.conv.conv': 'encoder.layers.1.block.1.conv',
'encoder.model.1.block.3.conv.conv': 'encoder.layers.1.block.3.conv',
'encoder.model.1.shortcut.conv.conv': 'encoder.layers.1.shortcut.conv',
'encoder.model.3.conv.conv': 'encoder.layers.3.conv',
'encoder.model.4.block.1.conv.conv': 'encoder.layers.4.block.1.conv',
'encoder.model.4.block.3.conv.conv': 'encoder.layers.4.block.3.conv',
'encoder.model.4.shortcut.conv.conv': 'encoder.layers.4.shortcut.conv',
'encoder.model.6.conv.conv': 'encoder.layers.6.conv',
'encoder.model.7.block.1.conv.conv': 'encoder.layers.7.block.1.conv',
'encoder.model.7.block.3.conv.conv': 'encoder.layers.7.block.3.conv',
'encoder.model.7.shortcut.conv.conv': 'encoder.layers.7.shortcut.conv',
'encoder.model.9.conv.conv': 'encoder.layers.9.conv',
'encoder.model.10.block.1.conv.conv': 'encoder.layers.10.block.1.conv',
'encoder.model.10.block.3.conv.conv': 'encoder.layers.10.block.3.conv',
'encoder.model.10.shortcut.conv.conv': 'encoder.layers.10.shortcut.conv',
'encoder.model.12.conv.conv': 'encoder.layers.12.conv',
'encoder.model.13.lstm': 'encoder.layers.13.lstm',
'encoder.model.15.conv.conv': 'encoder.layers.15.conv',
}
_SCREAMING_SNAKE_CASE = {
'encoder.model.0.conv.norm': 'encoder.layers.0.norm',
'encoder.model.1.block.1.conv.norm': 'encoder.layers.1.block.1.norm',
'encoder.model.1.block.3.conv.norm': 'encoder.layers.1.block.3.norm',
'encoder.model.1.shortcut.conv.norm': 'encoder.layers.1.shortcut.norm',
'encoder.model.3.conv.norm': 'encoder.layers.3.norm',
'encoder.model.4.block.1.conv.norm': 'encoder.layers.4.block.1.norm',
'encoder.model.4.block.3.conv.norm': 'encoder.layers.4.block.3.norm',
'encoder.model.4.shortcut.conv.norm': 'encoder.layers.4.shortcut.norm',
'encoder.model.6.conv.norm': 'encoder.layers.6.norm',
'encoder.model.7.block.1.conv.norm': 'encoder.layers.7.block.1.norm',
'encoder.model.7.block.3.conv.norm': 'encoder.layers.7.block.3.norm',
'encoder.model.7.shortcut.conv.norm': 'encoder.layers.7.shortcut.norm',
'encoder.model.9.conv.norm': 'encoder.layers.9.norm',
'encoder.model.10.block.1.conv.norm': 'encoder.layers.10.block.1.norm',
'encoder.model.10.block.3.conv.norm': 'encoder.layers.10.block.3.norm',
'encoder.model.10.shortcut.conv.norm': 'encoder.layers.10.shortcut.norm',
'encoder.model.12.conv.norm': 'encoder.layers.12.norm',
'encoder.model.15.conv.norm': 'encoder.layers.15.norm',
}
_SCREAMING_SNAKE_CASE = {
'decoder.model.0.conv.conv': 'decoder.layers.0.conv',
'decoder.model.1.lstm': 'decoder.layers.1.lstm',
'decoder.model.3.convtr.convtr': 'decoder.layers.3.conv',
'decoder.model.4.block.1.conv.conv': 'decoder.layers.4.block.1.conv',
'decoder.model.4.block.3.conv.conv': 'decoder.layers.4.block.3.conv',
'decoder.model.4.shortcut.conv.conv': 'decoder.layers.4.shortcut.conv',
'decoder.model.6.convtr.convtr': 'decoder.layers.6.conv',
'decoder.model.7.block.1.conv.conv': 'decoder.layers.7.block.1.conv',
'decoder.model.7.block.3.conv.conv': 'decoder.layers.7.block.3.conv',
'decoder.model.7.shortcut.conv.conv': 'decoder.layers.7.shortcut.conv',
'decoder.model.9.convtr.convtr': 'decoder.layers.9.conv',
'decoder.model.10.block.1.conv.conv': 'decoder.layers.10.block.1.conv',
'decoder.model.10.block.3.conv.conv': 'decoder.layers.10.block.3.conv',
'decoder.model.10.shortcut.conv.conv': 'decoder.layers.10.shortcut.conv',
'decoder.model.12.convtr.convtr': 'decoder.layers.12.conv',
'decoder.model.13.block.1.conv.conv': 'decoder.layers.13.block.1.conv',
'decoder.model.13.block.3.conv.conv': 'decoder.layers.13.block.3.conv',
'decoder.model.13.shortcut.conv.conv': 'decoder.layers.13.shortcut.conv',
'decoder.model.15.conv.conv': 'decoder.layers.15.conv',
}
_SCREAMING_SNAKE_CASE = {
'decoder.model.0.conv.norm': 'decoder.layers.0.norm',
'decoder.model.3.convtr.norm': 'decoder.layers.3.norm',
'decoder.model.4.block.1.conv.norm': 'decoder.layers.4.block.1.norm',
'decoder.model.4.block.3.conv.norm': 'decoder.layers.4.block.3.norm',
'decoder.model.4.shortcut.conv.norm': 'decoder.layers.4.shortcut.norm',
'decoder.model.6.convtr.norm': 'decoder.layers.6.norm',
'decoder.model.7.block.1.conv.norm': 'decoder.layers.7.block.1.norm',
'decoder.model.7.block.3.conv.norm': 'decoder.layers.7.block.3.norm',
'decoder.model.7.shortcut.conv.norm': 'decoder.layers.7.shortcut.norm',
'decoder.model.9.convtr.norm': 'decoder.layers.9.norm',
'decoder.model.10.block.1.conv.norm': 'decoder.layers.10.block.1.norm',
'decoder.model.10.block.3.conv.norm': 'decoder.layers.10.block.3.norm',
'decoder.model.10.shortcut.conv.norm': 'decoder.layers.10.shortcut.norm',
'decoder.model.12.convtr.norm': 'decoder.layers.12.norm',
'decoder.model.13.block.1.conv.norm': 'decoder.layers.13.block.1.norm',
'decoder.model.13.block.3.conv.norm': 'decoder.layers.13.block.3.norm',
'decoder.model.13.shortcut.conv.norm': 'decoder.layers.13.shortcut.norm',
'decoder.model.15.conv.norm': 'decoder.layers.15.norm',
}
_SCREAMING_SNAKE_CASE = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
_SCREAMING_SNAKE_CASE = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
def snake_case ( snake_case__ :List[Any] , snake_case__ :Dict , snake_case__ :Union[str, Any] , snake_case__ :Optional[int] , snake_case__ :Optional[Any]) -> Optional[int]:
for attribute in key.split("""."""):
_A = getattr(snake_case__ , snake_case__)
if weight_type is not None:
_A = getattr(snake_case__ , snake_case__).shape
else:
_A = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''')
if weight_type == "weight":
_A = value
elif weight_type == "weight_g":
_A = value
elif weight_type == "weight_v":
_A = value
elif weight_type == "bias":
_A = value
elif weight_type == "running_mean":
_A = value
elif weight_type == "running_var":
_A = value
elif weight_type == "num_batches_tracked":
_A = value
elif weight_type == "weight_ih_l0":
_A = value
elif weight_type == "weight_hh_l0":
_A = value
elif weight_type == "bias_ih_l0":
_A = value
elif weight_type == "bias_hh_l0":
_A = value
elif weight_type == "weight_ih_l1":
_A = value
elif weight_type == "weight_hh_l1":
_A = value
elif weight_type == "bias_ih_l1":
_A = value
elif weight_type == "bias_hh_l1":
_A = value
else:
_A = value
logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''')
def snake_case ( snake_case__ :Optional[Any] , snake_case__ :Tuple) -> Dict:
for key in ignore_keys:
if key.endswith(""".*"""):
if name.startswith(key[:-1]):
return True
elif ".*." in key:
_A , _A = key.split(""".*.""")
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def snake_case ( snake_case__ :str , snake_case__ :Union[str, Any] , snake_case__ :Dict) -> Optional[int]:
_A = []
if model_name == "encodec_24khz" or "encodec_32khz":
_A = MAPPING_24K
elif model_name == "encodec_48khz":
_A = MAPPING_48K
else:
raise ValueError(F'''Unsupported model: {model_name}''')
for name, value in orig_dict.items():
if should_ignore(snake_case__ , snake_case__):
logger.info(F'''{name} was ignored''')
continue
_A = False
for key, mapped_key in MAPPING.items():
if "*" in key:
_A , _A = key.split(""".*.""")
if prefix in name and suffix in name:
_A = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("""embed""") and name.endswith("""embed_avg"""):
continue
_A = True
if "*" in mapped_key:
_A = name.split(snake_case__)[0].split(""".""")[-2]
_A = mapped_key.replace("""*""" , snake_case__)
if "weight_g" in name:
_A = """weight_g"""
elif "weight_v" in name:
_A = """weight_v"""
elif "weight_ih_l0" in name:
_A = """weight_ih_l0"""
elif "weight_hh_l0" in name:
_A = """weight_hh_l0"""
elif "bias_ih_l0" in name:
_A = """bias_ih_l0"""
elif "bias_hh_l0" in name:
_A = """bias_hh_l0"""
elif "weight_ih_l1" in name:
_A = """weight_ih_l1"""
elif "weight_hh_l1" in name:
_A = """weight_hh_l1"""
elif "bias_ih_l1" in name:
_A = """bias_ih_l1"""
elif "bias_hh_l1" in name:
_A = """bias_hh_l1"""
elif "bias" in name:
_A = """bias"""
elif "weight" in name:
_A = """weight"""
elif "running_mean" in name:
_A = """running_mean"""
elif "running_var" in name:
_A = """running_var"""
elif "num_batches_tracked" in name:
_A = """num_batches_tracked"""
else:
_A = None
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__)
continue
if not is_used:
unused_weights.append(snake_case__)
logger.warning(F'''Unused weights: {unused_weights}''')
@torch.no_grad()
def snake_case ( snake_case__ :Dict , snake_case__ :List[Any] , snake_case__ :int , snake_case__ :Optional[Any]=None , snake_case__ :str=None , ) -> Union[str, Any]:
if config_path is not None:
_A = EncodecConfig.from_pretrained(snake_case__)
else:
_A = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
_A = [8, 5, 4, 4]
_A = [2.2]
_A = 64
_A = 32_000
_A = 2_048
_A = False
_A = False
_A = False
elif model_name == "encodec_48khz":
_A = [8, 5, 4, 2]
_A = [3.0, 6.0, 12.0, 24.0]
_A = 48_000
_A = 2
_A = False
_A = """time_group_norm"""
_A = True
_A = 1.0
_A = 0.01
else:
raise ValueError(F'''Unknown model name: {model_name}''')
_A = EncodecModel(snake_case__)
_A = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(snake_case__)
_A = torch.load(snake_case__)
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
_A = original_checkpoint["""best_state"""]
recursively_load_weights(snake_case__ , snake_case__ , snake_case__)
model.save_pretrained(snake_case__)
if repo_id:
print("""Pushing to the hub...""")
feature_extractor.push_to_hub(snake_case__)
model.push_to_hub(snake_case__)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'--model',
default='encodec_24khz',
type=str,
help='The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 401
|
from __future__ import annotations
def snake_case ( snake_case__ :list[int]) -> int:
if not nums:
return 0
_A = nums[0]
_A = 0
for num in nums[1:]:
_A , _A = (
max_excluding + num,
max(snake_case__ , snake_case__),
)
return max(snake_case__ , snake_case__)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 401
| 1
|
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
__magic_name__ : int = parse(importlib.metadata.version('''torch'''))
def A__ ( A_ , A_ , A_ ) -> str:
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" )
_lowercase = STR_OPERATION_TO_FUNC[operation]
if isinstance(A_ , A_ ):
_lowercase = parse(importlib.metadata.version(A_ ) )
return operation(A_ , parse(A_ ) )
def A__ ( A_ , A_ ) -> Optional[Any]:
return compare_versions(A_ , A_ , A_ )
| 602
|
'''simple docstring'''
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def A__ ( A_ ) -> str:
_lowercase = {}
_lowercase = job["started_at"]
_lowercase = job["completed_at"]
_lowercase = date_parser.parse(A_ )
_lowercase = date_parser.parse(A_ )
_lowercase = round((end_datetime - start_datetime).total_seconds() / 60.0 )
_lowercase = start
_lowercase = end
_lowercase = duration_in_min
return job_info
def A__ ( A_ , A_=None ) -> int:
_lowercase = None
if token is not None:
_lowercase = {"Accept": "application/vnd.github+json", "Authorization": F"""Bearer {token}"""}
_lowercase = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
_lowercase = requests.get(A_ , headers=A_ ).json()
_lowercase = {}
try:
job_time.update({job["name"]: extract_time_from_single_job(A_ ) for job in result["jobs"]} )
_lowercase = math.ceil((result["total_count"] - 100) / 100 )
for i in range(A_ ):
_lowercase = requests.get(url + F"""&page={i + 2}""" , headers=A_ ).json()
job_time.update({job["name"]: extract_time_from_single_job(A_ ) for job in result["jobs"]} )
return job_time
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
if __name__ == "__main__":
__magic_name__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
__magic_name__ : Optional[Any] = parser.parse_args()
__magic_name__ : Union[str, Any] = get_job_time(args.workflow_run_id)
__magic_name__ : List[Any] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f'''{k}: {v["duration"]}''')
| 602
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.