code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
class lowercase__ ( _UpperCamelCase):
UpperCamelCase_ = """linear"""
UpperCamelCase_ = """cosine"""
UpperCamelCase_ = """cosine_with_restarts"""
UpperCamelCase_ = """polynomial"""
UpperCamelCase_ = """constant"""
UpperCamelCase_ = """constant_with_warmup"""
UpperCamelCase_ = """piecewise_constant"""
def A ( _lowercase , _lowercase = -1 ):
return LambdaLR(lowerCAmelCase_ , lambda _lowercase : 1 , last_epoch=lowerCAmelCase_ )
def A ( _lowercase , _lowercase , _lowercase = -1 ):
def lr_lambda(_lowercase ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1.0 , lowerCAmelCase_ ) )
return 1.0
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
def A ( _lowercase , _lowercase , _lowercase = -1 ):
SCREAMING_SNAKE_CASE : List[Any] = {}
SCREAMING_SNAKE_CASE : int = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = rule_str.split(''':''' )
SCREAMING_SNAKE_CASE : List[Any] = int(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = float(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = value
SCREAMING_SNAKE_CASE : int = float(rule_list[-1] )
def create_rules_function(_lowercase , _lowercase ):
def rule_func(_lowercase ) -> float:
SCREAMING_SNAKE_CASE : Optional[Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCAmelCase_ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
SCREAMING_SNAKE_CASE : Union[str, Any] = create_rules_function(lowerCAmelCase_ , lowerCAmelCase_ )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
def A ( _lowercase , _lowercase , _lowercase , _lowercase=-1 ):
def lr_lambda(_lowercase ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def A ( _lowercase , _lowercase , _lowercase , _lowercase = 0.5 , _lowercase = -1 ):
def lr_lambda(_lowercase ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCAmelCase_ ) * 2.0 * progress )) )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def A ( _lowercase , _lowercase , _lowercase , _lowercase = 1 , _lowercase = -1 ):
def lr_lambda(_lowercase ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE : List[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCAmelCase_ ) * progress) % 1.0) )) )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def A ( _lowercase , _lowercase , _lowercase , _lowercase=1e-7 , _lowercase=1.0 , _lowercase=-1 ):
SCREAMING_SNAKE_CASE : Optional[int] = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(_lowercase ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
SCREAMING_SNAKE_CASE : Any = lr_init - lr_end
SCREAMING_SNAKE_CASE : Union[str, Any] = num_training_steps - num_warmup_steps
SCREAMING_SNAKE_CASE : Dict = 1 - (current_step - num_warmup_steps) / decay_steps
SCREAMING_SNAKE_CASE : Union[str, Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
__UpperCamelCase : Optional[int] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def A ( _lowercase , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = 1 , _lowercase = 1.0 , _lowercase = -1 , ):
SCREAMING_SNAKE_CASE : int = SchedulerType(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCAmelCase_ , step_rules=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , num_cycles=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , power=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , )
return schedule_func(
lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
| 248
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict=1_3 , lowerCAmelCase_ : str=3_2 , lowerCAmelCase_ : Optional[Any]=3 , lowerCAmelCase_ : Any=4 , lowerCAmelCase_ : str=[1_0, 2_0, 3_0, 4_0] , lowerCAmelCase_ : Tuple=[2, 2, 3, 2] , lowerCAmelCase_ : str=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[int]=3_7 , lowerCAmelCase_ : Dict="gelu" , lowerCAmelCase_ : List[Any]=1_0 , lowerCAmelCase_ : str=0.02 , lowerCAmelCase_ : Dict=["stage2", "stage3", "stage4"] , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : List[Any]=None , ) -> int:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = num_stages
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = depths
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = out_features
__lowerCAmelCase = num_labels
__lowerCAmelCase = scope
__lowerCAmelCase = num_stages
def lowercase ( self : Dict ) -> List[str]:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def lowercase ( self : List[str] ) -> Union[str, Any]:
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def lowercase ( self : Dict ) -> List[str]:
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_1_2 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=lowerCAmelCase_ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=4_0 , auxiliary_channels=2_5_6 , auxiliary_num_convs=1 , auxiliary_concat_input=lowerCAmelCase_ , loss_ignore_index=2_5_5 , num_labels=self.num_labels , )
def lowercase ( self : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int ) -> Optional[Any]:
__lowerCAmelCase = UperNetForSemanticSegmentation(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
a_ = {"""image-segmentation""": UperNetForSemanticSegmentation} if is_torch_available() else {}
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def lowercase ( self : Optional[int] ) -> Dict:
__lowerCAmelCase = UperNetModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=3_7 )
def lowercase ( self : List[str] ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self : Tuple ) -> Union[str, Any]:
return
def lowercase ( self : Optional[int] ) -> Optional[Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowercase ( self : List[Any] ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCAmelCase_ )
@unittest.skip(reason='UperNet does not use inputs_embeds' )
def lowercase ( self : Optional[int] ) -> Dict:
pass
@unittest.skip(reason='UperNet does not support input and output embeddings' )
def lowercase ( self : Optional[Any] ) -> Dict:
pass
@unittest.skip(reason='UperNet does not have a base model' )
def lowercase ( self : Optional[int] ) -> List[Any]:
pass
@unittest.skip(reason='UperNet does not have a base model' )
def lowercase ( self : str ) -> Dict:
pass
@require_torch_multi_gpu
@unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def lowercase ( self : Optional[Any] ) -> Optional[int]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase ( self : Tuple ) -> List[Any]:
pass
def lowercase ( self : Union[str, Any] ) -> Tuple:
def check_hidden_states_output(lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] ):
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
__lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase_ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Any ) -> Tuple:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = _config_zero_init(lowerCAmelCase_ )
__lowerCAmelCase = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(config=lowerCAmelCase_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason='UperNet does not have tied weights' )
def lowercase ( self : Any ) -> int:
pass
@slow
def lowercase ( self : Optional[int] ) -> Optional[int]:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = UperNetForSemanticSegmentation.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def a_ ( ):
__lowerCAmelCase = hf_hub_download(
repo_id='hf-internal-testing/fixtures_ade20k', repo_type='dataset', filename='ADE_val_00000001.jpg' )
__lowerCAmelCase = Image.open(lowerCAmelCase_ ).convert('RGB' )
return image
@require_torch
@require_vision
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Dict ) -> Union[str, Any]:
__lowerCAmelCase = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny' )
__lowerCAmelCase = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny' ).to(lowerCAmelCase_ )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = processor(images=lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
__lowerCAmelCase = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
def lowercase ( self : List[Any] ) -> List[str]:
__lowerCAmelCase = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny' )
__lowerCAmelCase = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny' ).to(lowerCAmelCase_ )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = processor(images=lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
__lowerCAmelCase = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 53
| 0
|
def _UpperCAmelCase ( UpperCAmelCase : int , UpperCAmelCase : int ):
"""simple docstring"""
return "\n".join(
f"""{number} * {i} = {number * i}""" for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 519
|
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Optional[Any] ):
assert isinstance(lowerCAmelCase_, lowerCAmelCase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : str, lowerCAmelCase_ : int ):
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, cache_dir=lowerCAmelCase_, keep_in_memory=lowerCAmelCase_ ).read()
_check_text_dataset(lowerCAmelCase_, lowerCAmelCase_ )
@pytest.mark.parametrize(
'features', [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
], )
def a_ ( lowerCAmelCase_ : Any, lowerCAmelCase_ : Dict, lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(lowerCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, features=lowerCAmelCase_, cache_dir=lowerCAmelCase_ ).read()
_check_text_dataset(lowerCAmelCase_, lowerCAmelCase_ )
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Any ):
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, cache_dir=lowerCAmelCase_, split=lowerCAmelCase_ ).read()
_check_text_dataset(lowerCAmelCase_, lowerCAmelCase_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type', [str, list] )
def a_ ( lowerCAmelCase_ : Dict, lowerCAmelCase_ : Any, lowerCAmelCase_ : Dict ):
if issubclass(lowerCAmelCase_, lowerCAmelCase_ ):
__lowerCAmelCase = text_path
elif issubclass(lowerCAmelCase_, lowerCAmelCase_ ):
__lowerCAmelCase = [text_path]
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, cache_dir=lowerCAmelCase_ ).read()
_check_text_dataset(lowerCAmelCase_, lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : int, lowerCAmelCase_ : Tuple=("train",) ):
assert isinstance(lowerCAmelCase_, lowerCAmelCase_ )
for split in splits:
__lowerCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def a_ ( lowerCAmelCase_ : Tuple, lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Dict ):
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase = TextDatasetReader({'train': text_path}, cache_dir=lowerCAmelCase_, keep_in_memory=lowerCAmelCase_ ).read()
_check_text_datasetdict(lowerCAmelCase_, lowerCAmelCase_ )
@pytest.mark.parametrize(
'features', [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
], )
def a_ ( lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : List[Any] ):
__lowerCAmelCase = tmp_path / 'cache'
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(lowerCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = TextDatasetReader({'train': text_path}, features=lowerCAmelCase_, cache_dir=lowerCAmelCase_ ).read()
_check_text_datasetdict(lowerCAmelCase_, lowerCAmelCase_ )
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : str, lowerCAmelCase_ : Optional[int] ):
if split:
__lowerCAmelCase = {split: text_path}
else:
__lowerCAmelCase = 'train'
__lowerCAmelCase = {'train': text_path, 'test': text_path}
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, cache_dir=lowerCAmelCase_ ).read()
_check_text_datasetdict(lowerCAmelCase_, lowerCAmelCase_, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 53
| 0
|
'''simple docstring'''
from __future__ import annotations
def __UpperCamelCase ( a : str , a : list[str] | None = None , a : dict[str, float] | None = None , a : bool = False , ) ->List[Any]:
snake_case = cipher_alphabet or [chr(lowerCAmelCase_ ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
snake_case = {
'''a''': 0.08497,
'''b''': 0.01492,
'''c''': 0.02202,
'''d''': 0.04253,
'''e''': 0.11162,
'''f''': 0.02228,
'''g''': 0.02015,
'''h''': 0.06094,
'''i''': 0.07546,
'''j''': 0.00153,
'''k''': 0.01292,
'''l''': 0.04025,
'''m''': 0.02406,
'''n''': 0.06749,
'''o''': 0.07507,
'''p''': 0.01929,
'''q''': 0.00095,
'''r''': 0.07587,
'''s''': 0.06327,
'''t''': 0.09356,
'''u''': 0.02758,
'''v''': 0.00978,
'''w''': 0.02560,
'''x''': 0.00150,
'''y''': 0.01994,
'''z''': 0.00077,
}
else:
# Custom frequencies dictionary
snake_case = frequencies_dict
if not case_sensitive:
snake_case = ciphertext.lower()
# Chi squared statistic values
snake_case = {}
# cycle through all of the shifts
for shift in range(len(lowerCAmelCase_ ) ):
snake_case = ''''''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
snake_case = (alphabet_letters.index(letter.lower() ) - shift) % len(
lowerCAmelCase_ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
snake_case = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
snake_case = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
snake_case = decrypted_with_shift.lower().count(lowerCAmelCase_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
snake_case = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
snake_case = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
snake_case = decrypted_with_shift.count(lowerCAmelCase_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
snake_case = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
snake_case = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
snake_case = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(a : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
snake_case = min(
lowerCAmelCase_ , key=lowerCAmelCase_ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
snake_case
) , (
snake_case
) ,
) = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 342
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : Union[str, Any] = logging.get_logger(__name__)
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : int=False ):
__lowerCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__lowerCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def a_ ( lowerCAmelCase_ : Any, lowerCAmelCase_ : Tuple, lowerCAmelCase_ : Optional[int]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__lowerCAmelCase = ''
else:
__lowerCAmelCase = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[
: config.hidden_size, :
]
__lowerCAmelCase = in_proj_bias[: config.hidden_size]
__lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
__lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def a_ ( lowerCAmelCase_ : List[str] ):
__lowerCAmelCase = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_, lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : int, lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = dct.pop(lowerCAmelCase_ )
__lowerCAmelCase = val
def a_ ( ):
__lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCAmelCase = Image.open(requests.get(lowerCAmelCase_, stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Optional[Any]=True ):
__lowerCAmelCase = ViTConfig()
# patch_size
if model_name[-1] == "8":
__lowerCAmelCase = 8
# set labels if required
if not base_model:
__lowerCAmelCase = 1000
__lowerCAmelCase = 'huggingface/label-files'
__lowerCAmelCase = 'imagenet-1k-id2label.json'
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) )
__lowerCAmelCase = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
__lowerCAmelCase = 384
__lowerCAmelCase = 1536
__lowerCAmelCase = 12
__lowerCAmelCase = 6
# load original model from torch hub
__lowerCAmelCase = torch.hub.load('facebookresearch/dino:main', lowerCAmelCase_ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
__lowerCAmelCase = original_model.state_dict()
if base_model:
remove_classification_head_(lowerCAmelCase_ )
__lowerCAmelCase = create_rename_keys(lowerCAmelCase_, base_model=lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# load HuggingFace model
if base_model:
__lowerCAmelCase = ViTModel(lowerCAmelCase_, add_pooling_layer=lowerCAmelCase_ ).eval()
else:
__lowerCAmelCase = ViTForImageClassification(lowerCAmelCase_ ).eval()
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image, prepared by ViTImageProcessor
__lowerCAmelCase = ViTImageProcessor()
__lowerCAmelCase = image_processor(images=prepare_img(), return_tensors='pt' )
__lowerCAmelCase = encoding['pixel_values']
__lowerCAmelCase = model(lowerCAmelCase_ )
if base_model:
__lowerCAmelCase = original_model(lowerCAmelCase_ )
assert torch.allclose(lowerCAmelCase_, outputs.last_hidden_state[:, 0, :], atol=1E-1 )
else:
__lowerCAmelCase = original_model(lowerCAmelCase_ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase_, outputs.logits, atol=1E-3 )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_snake_case : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
_snake_case : List[Any] = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 53
| 0
|
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase : Tuple = logging.get_logger(__name__)
lowercase : int = {'vocab_file': 'vocab.json'}
lowercase : List[str] = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
lowercase : Optional[int] = {'mgp-str': 27}
class UpperCAmelCase_ ( _UpperCamelCase ):
'''simple docstring'''
A : Dict = VOCAB_FILES_NAMES
A : Any = PRETRAINED_VOCAB_FILES_MAP
A : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="[GO]" , _SCREAMING_SNAKE_CASE="[GO]" , _SCREAMING_SNAKE_CASE="[s]" , _SCREAMING_SNAKE_CASE="[GO]" , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
super().__init__(
unk_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , **lowerCAmelCase_ , )
with open(lowerCAmelCase_ , encoding="utf-8" ) as vocab_handle:
snake_case_ : List[str] = json.load(lowerCAmelCase_ )
snake_case_ : Optional[Any] = {v: k for k, v in self.vocab.items()}
@property
def _lowerCAmelCase ( self ) -> Any:
return len(self.vocab )
def _lowerCAmelCase ( self ) -> List[Any]:
return dict(self.vocab , **self.added_tokens_encoder )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> str:
snake_case_ : Optional[int] = []
for s in text:
char_tokens.extend(lowerCAmelCase_ )
return char_tokens
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
return self.vocab.get(lowerCAmelCase_ , self.vocab.get(self.unk_token ) )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Dict:
return self.decoder.get(lowerCAmelCase_ )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error("Vocabulary path ({}) should be a directory".format(lowerCAmelCase_ ) )
return
snake_case_ : List[str] = os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(lowerCAmelCase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + "\n" )
return (vocab_file,)
| 568
|
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Union[str, Any] ) -> List[str]:
__lowerCAmelCase = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : str ) -> Any:
__lowerCAmelCase = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : Tuple ) -> Optional[int]:
__lowerCAmelCase = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : List[Any] ) -> List[str]:
__lowerCAmelCase = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : List[Any] ) -> int:
__lowerCAmelCase = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : str ) -> str:
__lowerCAmelCase = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : str ) -> List[Any]:
__lowerCAmelCase = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : str ) -> List[str]:
# pass variant but use the non-variant filenames
__lowerCAmelCase = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : str ) -> Union[str, Any]:
__lowerCAmelCase = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : str ) -> List[Any]:
__lowerCAmelCase = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : List[str] ) -> List[Any]:
# pass variant but use the non-variant filenames
__lowerCAmelCase = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
__lowerCAmelCase = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
| 53
| 0
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCamelCase )
class A__ ( _UpperCamelCase ):
"""simple docstring"""
__A : Any = field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
__A : Dict = Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} )
__A : int = Features(
{
'''answers''': Sequence(
{
'''text''': Value('''string''' ),
'''answer_start''': Value('''int32''' ),
} )
} )
__A : Optional[Any] = '''question'''
__A : Dict = '''context'''
__A : Dict = '''answers'''
@property
def __lowercase ( self) -> Dict[str, str]:
'''simple docstring'''
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 302
|
import math
def a_ ( lowerCAmelCase_ : list, lowerCAmelCase_ : int ):
__lowerCAmelCase = len(lowerCAmelCase_ )
__lowerCAmelCase = int(math.floor(math.sqrt(lowerCAmelCase_ ) ) )
__lowerCAmelCase = 0
while arr[min(lowerCAmelCase_, lowerCAmelCase_ ) - 1] < x:
__lowerCAmelCase = step
step += int(math.floor(math.sqrt(lowerCAmelCase_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
__lowerCAmelCase = prev + 1
if prev == min(lowerCAmelCase_, lowerCAmelCase_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
_snake_case : List[str] = input('Enter numbers separated by a comma:\n').strip()
_snake_case : Optional[Any] = [int(item) for item in user_input.split(',')]
_snake_case : List[str] = int(input('Enter the number to be searched:\n'))
_snake_case : Optional[int] = jump_search(arr, x)
if res == -1:
print('Number not found!')
else:
print(F"""Number {x} is at index {res}""")
| 53
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase: str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase: List[str] = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase: List[Any] = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
_lowerCAmelCase: Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 20
|
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : List[Any], lowerCAmelCase_ : str ):
# Initialise PyTorch model
__lowerCAmelCase = RemBertConfig.from_json_file(lowerCAmelCase_ )
print('Building PyTorch model from configuration: {}'.format(str(lowerCAmelCase_ ) ) )
__lowerCAmelCase = RemBertModel(lowerCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# Save pytorch-model
print('Save PyTorch model to {}'.format(lowerCAmelCase_ ) )
torch.save(model.state_dict(), lowerCAmelCase_ )
if __name__ == "__main__":
_snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_snake_case : int = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 53
| 0
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class UpperCAmelCase( _UpperCamelCase ):
"""simple docstring"""
a : List[Any] = 4_2
class UpperCAmelCase( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self , lowerCamelCase = 3 , lowerCamelCase = 3 , lowerCamelCase = ("DownEncoderBlock2D",) , lowerCamelCase = ("UpDecoderBlock2D",) , lowerCamelCase = (64,) , lowerCamelCase = 1 , lowerCamelCase = "silu" , lowerCamelCase = 3 , lowerCamelCase = 32 , lowerCamelCase = 256 , lowerCamelCase = 32 , lowerCamelCase = None , lowerCamelCase = 0.1_82_15 , lowerCamelCase = "group" , ) -> List[Any]:
"""simple docstring"""
super().__init__()
# pass init params to Encoder
lowercase__ : List[Any] = Encoder(
in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , down_block_types=lowerCAmelCase_ , block_out_channels=lowerCAmelCase_ , layers_per_block=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , double_z=lowerCAmelCase_ , )
lowercase__ : Dict = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowercase__ : Tuple = nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , 1 )
lowercase__ : Any = VectorQuantizer(lowerCAmelCase_ , lowerCAmelCase_ , beta=0.25 , remap=lowerCAmelCase_ , sane_index_shape=lowerCAmelCase_ )
lowercase__ : Union[str, Any] = nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , 1 )
# pass init params to Decoder
lowercase__ : Union[str, Any] = Decoder(
in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , up_block_types=lowerCAmelCase_ , block_out_channels=lowerCAmelCase_ , layers_per_block=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , norm_type=lowerCAmelCase_ , )
@apply_forward_hook
def __a ( self , lowerCamelCase , lowerCamelCase = True ) -> VQEncoderOutput:
"""simple docstring"""
lowercase__ : Dict = self.encoder(lowerCAmelCase_ )
lowercase__ : List[Any] = self.quant_conv(lowerCAmelCase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowerCAmelCase_ )
@apply_forward_hook
def __a ( self , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if not force_not_quantize:
lowercase__ , lowercase__ , lowercase__ : int = self.quantize(lowerCAmelCase_ )
else:
lowercase__ : Any = h
lowercase__ : List[Any] = self.post_quant_conv(lowerCAmelCase_ )
lowercase__ : Optional[Any] = self.decoder(lowerCAmelCase_ , quant if self.config.norm_type == "spatial" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase_ )
def __a ( self , lowerCamelCase , lowerCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
lowercase__ : str = sample
lowercase__ : int = self.encode(lowerCAmelCase_ ).latents
lowercase__ : int = self.decode(lowerCAmelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase_ )
| 397
|
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : Any = logging.get_logger(__name__)
def a_ ( lowerCAmelCase_ : str ):
__lowerCAmelCase = SwinConfig.from_pretrained(
'microsoft/swin-tiny-patch4-window7-224', out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
__lowerCAmelCase = MaskFormerConfig(backbone_config=lowerCAmelCase_ )
__lowerCAmelCase = 'huggingface/label-files'
if "ade20k-full" in model_name:
# this should be ok
__lowerCAmelCase = 847
__lowerCAmelCase = 'maskformer-ade20k-full-id2label.json'
elif "ade" in model_name:
# this should be ok
__lowerCAmelCase = 150
__lowerCAmelCase = 'ade20k-id2label.json'
elif "coco-stuff" in model_name:
# this should be ok
__lowerCAmelCase = 171
__lowerCAmelCase = 'maskformer-coco-stuff-id2label.json'
elif "coco" in model_name:
# TODO
__lowerCAmelCase = 133
__lowerCAmelCase = 'coco-panoptic-id2label.json'
elif "cityscapes" in model_name:
# this should be ok
__lowerCAmelCase = 19
__lowerCAmelCase = 'cityscapes-id2label.json'
elif "vistas" in model_name:
# this should be ok
__lowerCAmelCase = 65
__lowerCAmelCase = 'mapillary-vistas-id2label.json'
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) )
__lowerCAmelCase = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
return config
def a_ ( lowerCAmelCase_ : Tuple ):
__lowerCAmelCase = []
# stem
# fmt: off
rename_keys.append(('backbone.patch_embed.proj.weight', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.proj.bias', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'model.pixel_level_module.encoder.model.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'model.pixel_level_module.encoder.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(('sem_seg_head.layer_4.weight', 'model.pixel_level_module.decoder.fpn.stem.0.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.weight', 'model.pixel_level_module.decoder.fpn.stem.1.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.bias', 'model.pixel_level_module.decoder.fpn.stem.1.bias') )
for source_index, target_index in zip(range(3, 0, -1 ), range(0, 3 ) ):
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(('sem_seg_head.mask_features.weight', 'model.pixel_level_module.decoder.mask_projection.weight') )
rename_keys.append(('sem_seg_head.mask_features.bias', 'model.pixel_level_module.decoder.mask_projection.bias') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.weight', 'model.transformer_module.decoder.layernorm.weight') )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.bias', 'model.transformer_module.decoder.layernorm.bias') )
# heads on top
rename_keys.append(('sem_seg_head.predictor.query_embed.weight', 'model.transformer_module.queries_embedder.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.weight', 'model.transformer_module.input_projection.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.bias', 'model.transformer_module.input_projection.bias') )
rename_keys.append(('sem_seg_head.predictor.class_embed.weight', 'class_predictor.weight') )
rename_keys.append(('sem_seg_head.predictor.class_embed.bias', 'class_predictor.bias') )
for i in range(3 ):
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Tuple ):
__lowerCAmelCase = dct.pop(lowerCAmelCase_ )
__lowerCAmelCase = val
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : int ):
__lowerCAmelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowerCAmelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
__lowerCAmelCase = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[:dim, :]
__lowerCAmelCase = in_proj_bias[: dim]
__lowerCAmelCase = in_proj_weight[
dim : dim * 2, :
]
__lowerCAmelCase = in_proj_bias[
dim : dim * 2
]
__lowerCAmelCase = in_proj_weight[
-dim :, :
]
__lowerCAmelCase = in_proj_bias[-dim :]
# fmt: on
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : Dict ):
# fmt: off
__lowerCAmelCase = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
__lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[: hidden_size, :]
__lowerCAmelCase = in_proj_bias[:config.hidden_size]
__lowerCAmelCase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowerCAmelCase = in_proj_bias[hidden_size : hidden_size * 2]
__lowerCAmelCase = in_proj_weight[-hidden_size :, :]
__lowerCAmelCase = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
__lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[: hidden_size, :]
__lowerCAmelCase = in_proj_bias[:config.hidden_size]
__lowerCAmelCase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowerCAmelCase = in_proj_bias[hidden_size : hidden_size * 2]
__lowerCAmelCase = in_proj_weight[-hidden_size :, :]
__lowerCAmelCase = in_proj_bias[-hidden_size :]
# fmt: on
def a_ ( ):
__lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCAmelCase = Image.open(requests.get(lowerCAmelCase_, stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : str, lowerCAmelCase_ : str, lowerCAmelCase_ : bool = False ):
__lowerCAmelCase = get_maskformer_config(lowerCAmelCase_ )
# load original state_dict
with open(lowerCAmelCase_, 'rb' ) as f:
__lowerCAmelCase = pickle.load(lowerCAmelCase_ )
__lowerCAmelCase = data['model']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__lowerCAmelCase = create_rename_keys(lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
read_in_swin_q_k_v(lowerCAmelCase_, config.backbone_config )
read_in_decoder_q_k_v(lowerCAmelCase_, lowerCAmelCase_ )
# update to torch tensors
for key, value in state_dict.items():
__lowerCAmelCase = torch.from_numpy(lowerCAmelCase_ )
# load 🤗 model
__lowerCAmelCase = MaskFormerForInstanceSegmentation(lowerCAmelCase_ )
model.eval()
for name, param in model.named_parameters():
print(lowerCAmelCase_, param.shape )
__lowerCAmelCase , __lowerCAmelCase = model.load_state_dict(lowerCAmelCase_, strict=lowerCAmelCase_ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCAmelCase_ ) == 0, F"""Unexpected keys: {unexpected_keys}"""
# verify results
__lowerCAmelCase = prepare_img()
if "vistas" in model_name:
__lowerCAmelCase = 65
elif "cityscapes" in model_name:
__lowerCAmelCase = 6_5535
else:
__lowerCAmelCase = 255
__lowerCAmelCase = True if 'ade' in model_name else False
__lowerCAmelCase = MaskFormerImageProcessor(ignore_index=lowerCAmelCase_, reduce_labels=lowerCAmelCase_ )
__lowerCAmelCase = image_processor(lowerCAmelCase_, return_tensors='pt' )
__lowerCAmelCase = model(**lowerCAmelCase_ )
print('Logits:', outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__lowerCAmelCase = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3], lowerCAmelCase_, atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
print('Pushing model and image processor to the hub...' )
model.push_to_hub(F"""nielsr/{model_name}""" )
image_processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
_snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_snake_case : List[str] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 53
| 0
|
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
UpperCamelCase__ = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
UpperCamelCase__ = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
UpperCamelCase__ = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def lowerCAmelCase_ ( self : str , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : List[str]=None , UpperCamelCase : Optional[int]=None , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : List[Any]=None , UpperCamelCase : Any="auto" , UpperCamelCase : List[Any]=-1 , UpperCamelCase : int=0.9 , UpperCamelCase : Any=5 , UpperCamelCase : Union[str, Any]=5_00 , UpperCamelCase : Union[str, Any]="gpt2-large" , UpperCamelCase : Dict=-1 , UpperCamelCase : str=10_24 , UpperCamelCase : int=25 , UpperCamelCase : List[str]=5 , UpperCamelCase : Any=True , UpperCamelCase : str=25 , ):
"""simple docstring"""
_lowercase : Optional[int] = compute_mauve(
p_text=lowerCAmelCase_ , q_text=lowerCAmelCase_ , p_features=lowerCAmelCase_ , q_features=lowerCAmelCase_ , p_tokens=lowerCAmelCase_ , q_tokens=lowerCAmelCase_ , num_buckets=lowerCAmelCase_ , pca_max_data=lowerCAmelCase_ , kmeans_explained_var=lowerCAmelCase_ , kmeans_num_redo=lowerCAmelCase_ , kmeans_max_iter=lowerCAmelCase_ , featurize_model_name=lowerCAmelCase_ , device_id=lowerCAmelCase_ , max_text_length=lowerCAmelCase_ , divergence_curve_discretization_size=lowerCAmelCase_ , mauve_scaling_factor=lowerCAmelCase_ , verbose=lowerCAmelCase_ , seed=lowerCAmelCase_ , )
return out
| 322
|
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
_snake_case : List[Any] = True
from torch.cuda.amp import autocast
_snake_case : Dict = logging.getLogger(__name__)
def a_ ( lowerCAmelCase_ : str=None, lowerCAmelCase_ : str=None ):
return field(default_factory=lambda: default, metadata=lowerCAmelCase_ )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
a_ = field(
default=0.1 , metadata={"""help""": """The dropout ratio for the attention probabilities."""} )
a_ = field(
default=0.1 , metadata={"""help""": """The dropout ratio for activations inside the fully connected layer."""} )
a_ = field(
default=0.1 , metadata={
"""help""": """The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."""
} , )
a_ = field(
default=0.1 , metadata={"""help""": """The dropout probabilitiy for all 1D convolutional layers in feature extractor."""} , )
a_ = field(
default=0.05 , metadata={
"""help""": (
"""Propability of each feature vector along the time axis to be chosen as the start of the vector"""
"""span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"""
"""vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."""
)
} , )
a_ = field(default=0.0 , metadata={"""help""": """The LayerDrop probability."""} )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
a_ = field(
default="""train+validation""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of validation examples to this """
"""value if set."""
)
} , )
a_ = list_field(
default=[""",""", """?""", """.""", """!""", """-""", """;""", """:""", """\"\"""", """%""", """'""", """\"""", """�"""] , metadata={"""help""": """A list of characters to remove from the transcripts."""} , )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = 42
a_ = True
a_ = None
a_ = None
a_ = None
a_ = None
def __call__( self : int , lowerCAmelCase_ : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
__lowerCAmelCase = [{'input_values': feature['input_values']} for feature in features]
__lowerCAmelCase = [{'input_ids': feature['labels']} for feature in features]
__lowerCAmelCase = self.processor.pad(
lowerCAmelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
__lowerCAmelCase = self.processor.pad(
labels=lowerCAmelCase_ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='pt' , )
# replace padding with -100 to ignore loss correctly
__lowerCAmelCase = labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_0_0 )
__lowerCAmelCase = labels
return batch
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def lowercase ( self : Tuple , lowerCAmelCase_ : nn.Module , lowerCAmelCase_ : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
model.train()
__lowerCAmelCase = self._prepare_inputs(lowerCAmelCase_ )
if self.use_amp:
with autocast():
__lowerCAmelCase = self.compute_loss(lowerCAmelCase_ , lowerCAmelCase_ )
else:
__lowerCAmelCase = self.compute_loss(lowerCAmelCase_ , lowerCAmelCase_ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
__lowerCAmelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__lowerCAmelCase = loss.sum() / (inputs['labels'] >= 0).sum()
else:
raise ValueError(f"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
__lowerCAmelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowerCAmelCase_ ).backward()
elif self.use_apex:
with amp.scale_loss(lowerCAmelCase_ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowerCAmelCase_ )
else:
loss.backward()
return loss.detach()
def a_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout )], )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s', lowerCAmelCase_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
__lowerCAmelCase = datasets.load_dataset(
'common_voice', data_args.dataset_config_name, split=data_args.train_split_name )
__lowerCAmelCase = datasets.load_dataset('common_voice', data_args.dataset_config_name, split='test' )
# Create and save tokenizer
__lowerCAmelCase = F"""[{"".join(data_args.chars_to_ignore )}]"""
def remove_special_characters(lowerCAmelCase_ : Any ):
__lowerCAmelCase = re.sub(lowerCAmelCase_, '', batch['sentence'] ).lower() + ' '
return batch
__lowerCAmelCase = train_dataset.map(lowerCAmelCase_, remove_columns=['sentence'] )
__lowerCAmelCase = eval_dataset.map(lowerCAmelCase_, remove_columns=['sentence'] )
def extract_all_chars(lowerCAmelCase_ : Tuple ):
__lowerCAmelCase = ' '.join(batch['text'] )
__lowerCAmelCase = list(set(lowerCAmelCase_ ) )
return {"vocab": [vocab], "all_text": [all_text]}
__lowerCAmelCase = train_dataset.map(
lowerCAmelCase_, batched=lowerCAmelCase_, batch_size=-1, keep_in_memory=lowerCAmelCase_, remove_columns=train_dataset.column_names, )
__lowerCAmelCase = train_dataset.map(
lowerCAmelCase_, batched=lowerCAmelCase_, batch_size=-1, keep_in_memory=lowerCAmelCase_, remove_columns=eval_dataset.column_names, )
__lowerCAmelCase = list(set(vocab_train['vocab'][0] ) | set(vocab_test['vocab'][0] ) )
__lowerCAmelCase = {v: k for k, v in enumerate(lowerCAmelCase_ )}
__lowerCAmelCase = vocab_dict[' ']
del vocab_dict[" "]
__lowerCAmelCase = len(lowerCAmelCase_ )
__lowerCAmelCase = len(lowerCAmelCase_ )
with open('vocab.json', 'w' ) as vocab_file:
json.dump(lowerCAmelCase_, lowerCAmelCase_ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase = WavaVecaCTCTokenizer(
'vocab.json', unk_token='[UNK]', pad_token='[PAD]', word_delimiter_token='|', )
__lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=1_6000, padding_value=0.0, do_normalize=lowerCAmelCase_, return_attention_mask=lowerCAmelCase_ )
__lowerCAmelCase = WavaVecaProcessor(feature_extractor=lowerCAmelCase_, tokenizer=lowerCAmelCase_ )
__lowerCAmelCase = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path, cache_dir=model_args.cache_dir, activation_dropout=model_args.activation_dropout, attention_dropout=model_args.attention_dropout, hidden_dropout=model_args.hidden_dropout, feat_proj_dropout=model_args.feat_proj_dropout, mask_time_prob=model_args.mask_time_prob, gradient_checkpointing=training_args.gradient_checkpointing, layerdrop=model_args.layerdrop, ctc_loss_reduction='mean', pad_token_id=processor.tokenizer.pad_token_id, vocab_size=len(processor.tokenizer ), )
if data_args.max_train_samples is not None:
__lowerCAmelCase = min(len(lowerCAmelCase_ ), data_args.max_train_samples )
__lowerCAmelCase = train_dataset.select(range(lowerCAmelCase_ ) )
if data_args.max_val_samples is not None:
__lowerCAmelCase = eval_dataset.select(range(data_args.max_val_samples ) )
__lowerCAmelCase = torchaudio.transforms.Resample(4_8000, 1_6000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(lowerCAmelCase_ : int ):
__lowerCAmelCase , __lowerCAmelCase = torchaudio.load(batch['path'] )
__lowerCAmelCase = resampler(lowerCAmelCase_ ).squeeze().numpy()
__lowerCAmelCase = 1_6000
__lowerCAmelCase = batch['text']
return batch
__lowerCAmelCase = train_dataset.map(
lowerCAmelCase_, remove_columns=train_dataset.column_names, num_proc=data_args.preprocessing_num_workers, )
__lowerCAmelCase = eval_dataset.map(
lowerCAmelCase_, remove_columns=eval_dataset.column_names, num_proc=data_args.preprocessing_num_workers, )
def prepare_dataset(lowerCAmelCase_ : Union[str, Any] ):
# check that all files have the correct sampling rate
assert (
len(set(batch['sampling_rate'] ) ) == 1
), F"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."""
__lowerCAmelCase = processor(
audio=batch['speech'], text=batch['target_text'], sampling_rate=batch['sampling_rate'][0] )
batch.update(lowerCAmelCase_ )
return batch
__lowerCAmelCase = train_dataset.map(
lowerCAmelCase_, remove_columns=train_dataset.column_names, batch_size=training_args.per_device_train_batch_size, batched=lowerCAmelCase_, num_proc=data_args.preprocessing_num_workers, )
__lowerCAmelCase = eval_dataset.map(
lowerCAmelCase_, remove_columns=eval_dataset.column_names, batch_size=training_args.per_device_train_batch_size, batched=lowerCAmelCase_, num_proc=data_args.preprocessing_num_workers, )
# Metric
__lowerCAmelCase = datasets.load_metric('wer' )
def compute_metrics(lowerCAmelCase_ : Optional[Any] ):
__lowerCAmelCase = pred.predictions
__lowerCAmelCase = np.argmax(lowerCAmelCase_, axis=-1 )
__lowerCAmelCase = processor.tokenizer.pad_token_id
__lowerCAmelCase = processor.batch_decode(lowerCAmelCase_ )
# we do not want to group tokens when computing the metrics
__lowerCAmelCase = processor.batch_decode(pred.label_ids, group_tokens=lowerCAmelCase_ )
__lowerCAmelCase = wer_metric.compute(predictions=lowerCAmelCase_, references=lowerCAmelCase_ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
__lowerCAmelCase = DataCollatorCTCWithPadding(processor=lowerCAmelCase_, padding=lowerCAmelCase_ )
# Initialize our Trainer
__lowerCAmelCase = CTCTrainer(
model=lowerCAmelCase_, data_collator=lowerCAmelCase_, args=lowerCAmelCase_, compute_metrics=lowerCAmelCase_, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=processor.feature_extractor, )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__lowerCAmelCase = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
__lowerCAmelCase = model_args.model_name_or_path
else:
__lowerCAmelCase = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
__lowerCAmelCase = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
trainer.save_model()
__lowerCAmelCase = train_result.metrics
__lowerCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase_ )
)
__lowerCAmelCase = min(lowerCAmelCase_, len(lowerCAmelCase_ ) )
trainer.log_metrics('train', lowerCAmelCase_ )
trainer.save_metrics('train', lowerCAmelCase_ )
trainer.save_state()
# Evaluation
__lowerCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowerCAmelCase = trainer.evaluate()
__lowerCAmelCase = data_args.max_val_samples if data_args.max_val_samples is not None else len(lowerCAmelCase_ )
__lowerCAmelCase = min(lowerCAmelCase_, len(lowerCAmelCase_ ) )
trainer.log_metrics('eval', lowerCAmelCase_ )
trainer.save_metrics('eval', lowerCAmelCase_ )
return results
if __name__ == "__main__":
main()
| 53
| 0
|
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
def _lowerCamelCase ( a_ : str , a_ : str):
lowerCamelCase :Dict = RobertaPreLayerNormConfig.from_pretrained(
lowerCAmelCase_ , architectures=['''RobertaPreLayerNormForMaskedLM'''])
# convert state_dict
lowerCamelCase :Union[str, Any] = torch.load(hf_hub_download(repo_id=lowerCAmelCase_ , filename='''pytorch_model.bin'''))
lowerCamelCase :Optional[int] = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('''roberta.'''):
lowerCamelCase :Optional[Any] = '''roberta_prelayernorm.''' + tensor_key[len('''roberta.''') :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('''.self.LayerNorm.weight''') or tensor_key.endswith('''.self.LayerNorm.bias'''):
continue
lowerCamelCase :Any = tensor_value
lowerCamelCase :str = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=lowerCAmelCase_ , config=lowerCAmelCase_ , state_dict=lowerCAmelCase_)
model.save_pretrained(lowerCAmelCase_)
# convert tokenizer
lowerCamelCase :List[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase_)
tokenizer.save_pretrained(lowerCAmelCase_)
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
A__ = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 166
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
_snake_case : Any = logging.get_logger(__name__)
_snake_case : int = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_snake_case : Optional[Any] = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
_snake_case : str = {
'yjernite/retribert-base-uncased': 512,
}
_snake_case : Optional[int] = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = PRETRAINED_INIT_CONFIGURATION
a_ = RetriBertTokenizer
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self : Dict , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : str="[UNK]" , lowerCAmelCase_ : Optional[Any]="[SEP]" , lowerCAmelCase_ : List[str]="[PAD]" , lowerCAmelCase_ : Optional[int]="[CLS]" , lowerCAmelCase_ : List[Any]="[MASK]" , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : List[str]=None , **lowerCAmelCase_ : List[Any] , ) -> Dict:
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
__lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get('strip_accents' , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowerCAmelCase_ ) != tokenize_chinese_chars
):
__lowerCAmelCase = getattr(lowerCAmelCase_ , normalizer_state.pop('type' ) )
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = strip_accents
__lowerCAmelCase = tokenize_chinese_chars
__lowerCAmelCase = normalizer_class(**lowerCAmelCase_ )
__lowerCAmelCase = do_lower_case
def lowercase ( self : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int]=None ) -> Optional[int]:
__lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
__lowerCAmelCase = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 53
| 0
|
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Any:
snake_case : int = len(lowerCAmelCase_ )
for i in range(lowerCAmelCase_ ):
for j in range(i + 1 ,lowerCAmelCase_ ):
if numbers[j] < numbers[i]:
snake_case , snake_case : Optional[int] = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowerCamelCase : Any = input('Enter numbers separated by a comma:\n').strip()
lowerCamelCase : Dict = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 587
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
_snake_case : Union[str, Any] = imread(R'digital_image_processing/image_data/lena_small.jpg')
_snake_case : Optional[int] = cvtColor(img, COLOR_BGR2GRAY)
def a_ ( ):
__lowerCAmelCase = cn.convert_to_negative(lowerCAmelCase_ )
# assert negative_img array for at least one True
assert negative_img.any()
def a_ ( ):
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowerCAmelCase_, 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def a_ ( ):
__lowerCAmelCase = canny.gen_gaussian_kernel(9, sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def a_ ( ):
__lowerCAmelCase = imread('digital_image_processing/image_data/lena_small.jpg', 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__lowerCAmelCase = canny.canny(lowerCAmelCase_ )
# assert canny array for at least one True
assert canny_array.any()
def a_ ( ):
assert gg.gaussian_filter(lowerCAmelCase_, 5, sigma=0.9 ).all()
def a_ ( ):
# laplace diagonals
__lowerCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__lowerCAmelCase = conv.img_convolve(lowerCAmelCase_, lowerCAmelCase_ ).astype(lowerCAmelCase_ )
assert res.any()
def a_ ( ):
assert med.median_filter(lowerCAmelCase_, 3 ).any()
def a_ ( ):
__lowerCAmelCase , __lowerCAmelCase = sob.sobel_filter(lowerCAmelCase_ )
assert grad.any() and theta.any()
def a_ ( ):
__lowerCAmelCase = sp.make_sepia(lowerCAmelCase_, 20 )
assert sepia.all()
def a_ ( lowerCAmelCase_ : str = "digital_image_processing/image_data/lena_small.jpg" ):
__lowerCAmelCase = bs.Burkes(imread(lowerCAmelCase_, 1 ), 120 )
burkes.process()
assert burkes.output_img.any()
def a_ ( lowerCAmelCase_ : str = "digital_image_processing/image_data/lena_small.jpg", ):
__lowerCAmelCase = rs.NearestNeighbour(imread(lowerCAmelCase_, 1 ), 400, 200 )
nn.process()
assert nn.output.any()
def a_ ( ):
__lowerCAmelCase = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
__lowerCAmelCase = imread(lowerCAmelCase_, 0 )
# Test for get_neighbors_pixel function() return not None
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = image[x_coordinate][y_coordinate]
__lowerCAmelCase = lbp.get_neighbors_pixel(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__lowerCAmelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0, image.shape[0] ):
for j in range(0, image.shape[1] ):
__lowerCAmelCase = lbp.local_binary_value(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
assert lbp_image.any()
| 53
| 0
|
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
UpperCamelCase = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
UpperCamelCase = {
'abeja/gpt-neox-japanese-2.7b': 2_048,
}
def lowerCamelCase_ ( _lowercase , _lowercase ) -> Tuple:
with open(lowerCAmelCase_ , "r" , encoding="utf-8" ) as f:
__A : Dict = json.loads(f.read() )
__A : List[Any] = collections.OrderedDict()
__A : Tuple = collections.OrderedDict()
__A : List[Any] = collections.OrderedDict()
with open(lowerCAmelCase_ , "r" , encoding="utf-8" ) as f:
__A : List[Any] = f.readlines()
__A : Dict = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(lowerCAmelCase_ ):
__A : Tuple = b
__A : List[Any] = idx
for wd in b:
__A : int = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class _a ( _UpperCamelCase ):
'''simple docstring'''
lowerCamelCase_ : Tuple = VOCAB_FILES_NAMES
lowerCamelCase_ : str = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : List[str] = ["""input_ids""", """attention_mask"""]
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|startoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase=False , **__UpperCAmelCase , ):
super().__init__(
unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , do_clean_text=lowerCAmelCase_ , **lowerCAmelCase_ , )
if not os.path.isfile(lowerCAmelCase_ ):
raise ValueError(
F"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
if not os.path.isfile(lowerCAmelCase_ ):
raise ValueError(
F"Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
__A : Any = do_clean_text
__A , __A , __A , __A : Union[str, Any] = load_vocab_and_emoji(lowerCAmelCase_ , lowerCAmelCase_ )
__A : Union[str, Any] = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def __UpperCAmelCase( self ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def __UpperCAmelCase( self ):
return dict(self.raw_vocab , **self.added_tokens_encoder )
def __UpperCAmelCase( self , __UpperCAmelCase ):
return self.subword_tokenizer.tokenize(lowerCAmelCase_ , clean=self.do_clean_text )
def __UpperCAmelCase( self , __UpperCAmelCase ):
return self.vocab.get(lowerCAmelCase_ , self.vocab.get(self.unk_token ) )
def __UpperCAmelCase( self , __UpperCAmelCase ):
return self.subword_tokenizer.convert_id_to_token(lowerCAmelCase_ )
def __UpperCAmelCase( self , __UpperCAmelCase ):
__A : List[Any] = "".join(lowerCAmelCase_ ).strip()
return out_string
def __UpperCAmelCase( self , __UpperCAmelCase ):
__A : Optional[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) + [self.eos_token_id] )
if len(lowerCAmelCase_ ) > self.model_max_length:
__A : Dict = input_ids[-self.model_max_length :]
return input_ids
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase = None ):
__A : Tuple = 0
if os.path.isdir(lowerCAmelCase_ ):
__A : Dict = os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__A : int = os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] )
else:
__A : Optional[int] = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
__A : Tuple = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(lowerCAmelCase_ , "w" , encoding="utf-8" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!" )
__A : List[str] = token_index
writer.write(",".join(lowerCAmelCase_ ) + "\n" )
index += 1
with open(lowerCAmelCase_ , "w" , encoding="utf-8" ) as writer:
json.dump(self.emoji , lowerCAmelCase_ )
return vocab_file, emoji_file
class _a ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : Optional[int] = vocab # same as swe
__A : Tuple = ids_to_tokens # same as bpe
__A : int = emoji
__A : int = np.max([len(lowerCAmelCase_ ) for w in self.vocab.keys()] )
__A : str = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" )
__A : Union[str, Any] = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" )
__A : Tuple = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" )
__A : List[Any] = re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
__A : Any = re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
__A : Any = re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" )
__A : Union[str, Any] = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
__A : str = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
__A : Any = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} )
def __len__( self ):
return len(self.ids_to_tokens )
def __UpperCAmelCase( self , __UpperCAmelCase ):
__A : List[str] = self.content_repattera.sub("<URL>" , lowerCAmelCase_ )
__A : Dict = self.content_repattera.sub("<EMAIL>" , lowerCAmelCase_ )
__A : Optional[int] = self.content_repattera.sub("<TEL>" , lowerCAmelCase_ )
__A : Tuple = self.content_repattera.sub("<DATE>" , lowerCAmelCase_ )
__A : List[str] = self.content_repattera.sub("<DATE>" , lowerCAmelCase_ )
__A : Tuple = self.content_repattera.sub("<PRICE>" , lowerCAmelCase_ )
__A : List[str] = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
__A : List[str] = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" )
return content
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase=False ):
__A : Dict = text.replace(" " , "<SP>" )
__A : int = text.replace(" " , "<SP>" )
__A : str = text.replace("\r\n" , "<BR>" )
__A : int = text.replace("\n" , "<BR>" )
__A : str = text.replace("\r" , "<BR>" )
__A : Dict = text.replace("\t" , "<TAB>" )
__A : Union[str, Any] = text.replace("—" , "ー" )
__A : List[str] = text.replace("−" , "ー" )
for k, v in self.emoji["emoji"].items():
if k in text:
__A : Dict = text.replace(lowerCAmelCase_ , lowerCAmelCase_ )
if clean:
__A : Dict = self.clean_text(lowerCAmelCase_ )
def check_simbol(__UpperCAmelCase ):
__A : Any = x.encode()
if len(lowerCAmelCase_ ) == 1 and len(lowerCAmelCase_ ) == 2:
__A : Dict = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC2A1 and c <= 0XC2BF)
or (c >= 0XC780 and c <= 0XC783)
or (c >= 0XCAB9 and c <= 0XCBBF)
or (c >= 0XCC80 and c <= 0XCDA2)
):
return True
return False
def checkuae(__UpperCAmelCase ):
__A : Any = x.encode()
if len(lowerCAmelCase_ ) == 1 and len(lowerCAmelCase_ ) == 3:
__A : Tuple = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE28080 and c <= 0XE2B07F:
return True
return False
__A : List[Any] = 0
__A : int = []
while pos < len(lowerCAmelCase_ ):
__A : int = min(len(lowerCAmelCase_ ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3
__A : Any = [] # (token_id, token, pos)
for e in range(lowerCAmelCase_ , lowerCAmelCase_ , -1 ):
__A : Optional[int] = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(lowerCAmelCase_ ) > 2:
__A : List[Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(lowerCAmelCase_ ) > 0:
# the smallest token_id is adopted
__A , __A , __A : List[str] = sorted(lowerCAmelCase_ , key=lambda __UpperCAmelCase : x[0] )[0]
result.append(lowerCAmelCase_ )
__A : Union[str, Any] = e
else:
__A : Any = pos + 1
__A : str = text[pos:end]
if check_simbol(lowerCAmelCase_ ):
result.append("<KIGOU>" )
elif checkuae(lowerCAmelCase_ ):
result.append("<U2000U2BFF>" )
else:
for i in wd.encode("utf-8" ):
result.append("<|byte%d|>" % i )
__A : Optional[Any] = end
return result
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase="\n" ):
__A : List[str] = []
__A : Union[str, Any] = []
__A : Union[str, Any] = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(lowerCAmelCase_ ) > 0:
words.append(bytearray(lowerCAmelCase_ ).decode("utf-8" , errors="replace" ) )
__A : Union[str, Any] = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word] )
elif word == "<SP>":
words.append(" " )
elif word == "<BR>":
words.append(lowerCAmelCase_ )
elif word == "<TAB>":
words.append("\t" )
elif word == "<BLOCK>":
words.append("▀" )
elif word == "<KIGOU>":
words.append("ǀ" )
elif word == "<U2000U2BFF>":
words.append("‖" )
else:
words.append(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
words.append(bytearray(lowerCAmelCase_ ).decode("utf-8" , errors="replace" ) )
__A : Optional[int] = "".join(lowerCAmelCase_ )
return text
| 520
|
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_snake_case : List[Any] = logging.get_logger(__name__)
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = ["""pixel_values"""]
def __init__( self : Optional[int] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_5_5 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , lowerCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **lowerCAmelCase_ : Any , ) -> None:
super().__init__(**lowerCAmelCase_ )
__lowerCAmelCase = size if size is not None else {'shortest_edge': 2_2_4}
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
__lowerCAmelCase = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , param_name='crop_size' )
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
__lowerCAmelCase = resample
__lowerCAmelCase = do_center_crop
__lowerCAmelCase = crop_size
__lowerCAmelCase = do_rescale
__lowerCAmelCase = rescale_factor
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__lowerCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowercase ( self : Dict , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Optional[int] , ) -> np.ndarray:
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
__lowerCAmelCase = int((2_5_6 / 2_2_4) * size['shortest_edge'] )
__lowerCAmelCase = get_resize_output_image_size(lowerCAmelCase_ , size=lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
__lowerCAmelCase = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
lowerCAmelCase_ , size=(size_dict['height'], size_dict['width']) , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : str , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : str , ) -> np.ndarray:
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(lowerCAmelCase_ , size=(size['height'], size['width']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : Dict , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[int, float] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : int , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[str] , ) -> np.ndarray:
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , lowerCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , lowerCAmelCase_ : Optional[TensorType] = None , lowerCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase_ : str , ) -> BatchFeature:
__lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase = resample if resample is not None else self.resample
__lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase = image_std if image_std is not None else self.image_std
__lowerCAmelCase = size if size is not None else self.size
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
__lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , param_name='crop_size' )
__lowerCAmelCase = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__lowerCAmelCase = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
__lowerCAmelCase = [self.resize(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
if do_center_crop:
__lowerCAmelCase = [self.center_crop(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
if do_rescale:
__lowerCAmelCase = [self.rescale(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
if do_normalize:
__lowerCAmelCase = [self.normalize(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
__lowerCAmelCase = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
__lowerCAmelCase = {'pixel_values': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 53
| 0
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__UpperCamelCase : Optional[int] = 16
__UpperCamelCase : Tuple = 32
def A ( _lowercase , _lowercase = 16 , _lowercase = "bert-base-cased" ):
SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(_lowercase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE : Any = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE : Tuple = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=lowerCAmelCase_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE : Tuple = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCAmelCase_ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(lowerCAmelCase_ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE : str = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader
def A ( _lowercase , _lowercase , _lowercase , _lowercase ):
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = 0
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowerCAmelCase_ ) - 1:
SCREAMING_SNAKE_CASE : Tuple = predictions[: len(eval_dataloader.dataset ) - samples_seen]
SCREAMING_SNAKE_CASE : List[str] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , )
SCREAMING_SNAKE_CASE : List[Any] = metric.compute()
return eval_metric["accuracy"]
def A ( _lowercase , _lowercase ):
# Initialize accelerator
SCREAMING_SNAKE_CASE : Union[str, Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE : Dict = config['''lr''']
SCREAMING_SNAKE_CASE : Optional[int] = int(config['''num_epochs'''] )
SCREAMING_SNAKE_CASE : Optional[int] = int(config['''seed'''] )
SCREAMING_SNAKE_CASE : Optional[int] = int(config['''batch_size'''] )
SCREAMING_SNAKE_CASE : int = args.model_name_or_path
set_seed(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
# Instantiate optimizer
SCREAMING_SNAKE_CASE : Dict = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
SCREAMING_SNAKE_CASE : Dict = optimizer_cls(params=model.parameters() , lr=lowerCAmelCase_ )
if accelerator.state.deepspeed_plugin is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
SCREAMING_SNAKE_CASE : List[str] = 1
SCREAMING_SNAKE_CASE : List[str] = (len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
SCREAMING_SNAKE_CASE : List[str] = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ , num_warmup_steps=0 , num_training_steps=lowerCAmelCase_ , )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = DummyScheduler(lowerCAmelCase_ , total_num_steps=lowerCAmelCase_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = accelerator.prepare(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# We need to keep track of how many total steps we have iterated over
SCREAMING_SNAKE_CASE : Any = 0
# We also need to keep track of the stating epoch so files are named properly
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : List[Any] = evaluate.load('''glue''' , '''mrpc''' )
SCREAMING_SNAKE_CASE : Dict = num_epochs
if args.partial_train_epoch is not None:
SCREAMING_SNAKE_CASE : Optional[int] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
SCREAMING_SNAKE_CASE : Optional[Any] = args.resume_from_checkpoint.split('''epoch_''' )[1]
SCREAMING_SNAKE_CASE : str = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
SCREAMING_SNAKE_CASE : str = int(lowerCAmelCase_ ) + 1
SCREAMING_SNAKE_CASE : str = evaluation_loop(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
accelerator.print('''resumed checkpoint performance:''' , lowerCAmelCase_ )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , f"""state_{starting_epoch-1}.json""" ) , '''r''' ) as f:
SCREAMING_SNAKE_CASE : int = json.load(lowerCAmelCase_ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
SCREAMING_SNAKE_CASE : Any = {}
for epoch in range(lowerCAmelCase_ , lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE : Tuple = model(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = outputs.loss
SCREAMING_SNAKE_CASE : str = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
SCREAMING_SNAKE_CASE : Optional[int] = f"""epoch_{epoch}"""
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(args.output_dir , lowerCAmelCase_ )
accelerator.save_state(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : str = evaluation_loop(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = accuracy
SCREAMING_SNAKE_CASE : Tuple = lr_scheduler.get_lr()[0]
SCREAMING_SNAKE_CASE : int = optimizer.param_groups[0]['''lr''']
SCREAMING_SNAKE_CASE : Optional[int] = epoch
SCREAMING_SNAKE_CASE : Any = overall_step
accelerator.print(f"""epoch {epoch}:""" , lowerCAmelCase_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f"""state_{epoch}.json""" ) , '''w''' ) as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
def A ( ):
SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=lowerCAmelCase_ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=lowerCAmelCase_ , )
parser.add_argument(
'''--output_dir''' , type=lowerCAmelCase_ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=lowerCAmelCase_ , default=2 , help='''Number of train epochs.''' , )
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
SCREAMING_SNAKE_CASE : List[Any] = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 248
|
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Optional[int]=8 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[Any]=9_9 , lowerCAmelCase_ : List[Any]=1_6 , lowerCAmelCase_ : int=5 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : str=3_6 , lowerCAmelCase_ : Optional[int]="gelu" , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : str=5_1_2 , lowerCAmelCase_ : List[str]=1_6 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : Tuple=0.02 , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : List[Any]=4 , lowerCAmelCase_ : List[str]=None , ) -> List[Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = scope
def lowercase ( self : Optional[int] ) -> Dict:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self : Any ) -> Union[str, Any]:
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def lowercase ( self : Dict ) -> List[Any]:
__lowerCAmelCase = self.get_config()
__lowerCAmelCase = 3_0_0
return config
def lowercase ( self : Optional[int] ) -> Union[str, Any]:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
__lowerCAmelCase = True
__lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase ( self : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple ) -> List[str]:
__lowerCAmelCase = MraModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , ) -> Tuple:
__lowerCAmelCase = True
__lowerCAmelCase = MraModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )
__lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , )
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple ) -> List[str]:
__lowerCAmelCase = MraForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict ) -> str:
__lowerCAmelCase = MraForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict ) -> Optional[Any]:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = MraForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict ) -> Any:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = MraForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] ) -> List[Any]:
__lowerCAmelCase = self.num_choices
__lowerCAmelCase = MraForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self : Tuple ) -> Optional[Any]:
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
a_ = False
a_ = False
a_ = False
a_ = False
a_ = ()
def lowercase ( self : List[Any] ) -> Optional[Any]:
__lowerCAmelCase = MraModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=3_7 )
def lowercase ( self : Tuple ) -> List[str]:
self.config_tester.run_common_tests()
def lowercase ( self : Optional[int] ) -> Tuple:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : int ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase = type
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase_ )
def lowercase ( self : List[str] ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase_ )
def lowercase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase_ )
def lowercase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase_ )
def lowercase ( self : Tuple ) -> str:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase_ )
@slow
def lowercase ( self : Optional[int] ) -> Optional[int]:
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = MraModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@unittest.skip(reason='MRA does not output attentions' )
def lowercase ( self : Optional[int] ) -> Tuple:
return
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase ( self : Optional[Any] ) -> List[str]:
__lowerCAmelCase = MraModel.from_pretrained('uw-madison/mra-base-512-4' )
__lowerCAmelCase = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
__lowerCAmelCase = model(lowerCAmelCase_ )[0]
__lowerCAmelCase = torch.Size((1, 2_5_6, 7_6_8) )
self.assertEqual(output.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@slow
def lowercase ( self : int ) -> Optional[int]:
__lowerCAmelCase = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' )
__lowerCAmelCase = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
__lowerCAmelCase = model(lowerCAmelCase_ )[0]
__lowerCAmelCase = 5_0_2_6_5
__lowerCAmelCase = torch.Size((1, 2_5_6, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@slow
def lowercase ( self : Any ) -> List[str]:
__lowerCAmelCase = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' )
__lowerCAmelCase = torch.arange(4_0_9_6 ).unsqueeze(0 )
with torch.no_grad():
__lowerCAmelCase = model(lowerCAmelCase_ )[0]
__lowerCAmelCase = 5_0_2_6_5
__lowerCAmelCase = torch.Size((1, 4_0_9_6, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 53
| 0
|
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Any ):
'''simple docstring'''
super().tearDown()
gc.collect()
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
__lowerCamelCase : Any = """A painting of a squirrel eating a burger"""
__lowerCamelCase : Optional[int] = jax.device_count()
__lowerCamelCase : List[str] = num_samples * [prompt]
__lowerCamelCase : Union[str, Any] = sd_pipe.prepare_inputs(lowerCAmelCase_ )
__lowerCamelCase : int = replicate(lowerCAmelCase_ )
__lowerCamelCase : Any = shard(lowerCAmelCase_ )
__lowerCamelCase : Any = jax.random.PRNGKey(0 )
__lowerCamelCase : Optional[int] = jax.random.split(lowerCAmelCase_ , jax.device_count() )
__lowerCamelCase : Any = sd_pipe(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , num_inference_steps=2_5 , jit=lowerCAmelCase_ )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
__lowerCamelCase : str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowerCamelCase : List[Any] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
__lowerCamelCase : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowerCamelCase : Dict = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = """stabilityai/stable-diffusion-2"""
__lowerCamelCase , __lowerCamelCase : Tuple = FlaxDPMSolverMultistepScheduler.from_pretrained(lowerCAmelCase_ , subfolder="""scheduler""" )
__lowerCamelCase , __lowerCamelCase : Dict = FlaxStableDiffusionPipeline.from_pretrained(
lowerCAmelCase_ , scheduler=lowerCAmelCase_ , revision="""bf16""" , dtype=jnp.bfloataa , )
__lowerCamelCase : List[str] = scheduler_params
__lowerCamelCase : Optional[Any] = """A painting of a squirrel eating a burger"""
__lowerCamelCase : Union[str, Any] = jax.device_count()
__lowerCamelCase : str = num_samples * [prompt]
__lowerCamelCase : str = sd_pipe.prepare_inputs(lowerCAmelCase_ )
__lowerCamelCase : Optional[Any] = replicate(lowerCAmelCase_ )
__lowerCamelCase : Optional[int] = shard(lowerCAmelCase_ )
__lowerCamelCase : Union[str, Any] = jax.random.PRNGKey(0 )
__lowerCamelCase : Optional[int] = jax.random.split(lowerCAmelCase_ , jax.device_count() )
__lowerCamelCase : str = sd_pipe(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , num_inference_steps=2_5 , jit=lowerCAmelCase_ )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
__lowerCamelCase : Optional[int] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowerCamelCase : int = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
__lowerCamelCase : List[str] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowerCamelCase : List[str] = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 519
|
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_snake_case : Union[str, Any] = 2
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , *, # begin keyword-only arguments
lowerCAmelCase_ : str="<s>" , lowerCAmelCase_ : Dict="<pad>" , lowerCAmelCase_ : Any="</s>" , lowerCAmelCase_ : List[str]="<unk>" , lowerCAmelCase_ : Optional[Any]=None , ) -> Union[str, Any]:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = bos, unk, pad, eos
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = {}
__lowerCAmelCase = self.add_symbol(lowerCAmelCase_ )
__lowerCAmelCase = self.add_symbol(lowerCAmelCase_ )
__lowerCAmelCase = self.add_symbol(lowerCAmelCase_ )
__lowerCAmelCase = self.add_symbol(lowerCAmelCase_ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(lowerCAmelCase_ )
__lowerCAmelCase = len(self.symbols )
def __eq__( self : Dict , lowerCAmelCase_ : Dict ) -> str:
return self.indices == other.indices
def __getitem__( self : List[Any] , lowerCAmelCase_ : int ) -> Union[str, Any]:
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : Tuple ) -> List[Any]:
return len(self.symbols )
def __contains__( self : Optional[Any] , lowerCAmelCase_ : Dict ) -> Optional[int]:
return sym in self.indices
@classmethod
def lowercase ( cls : Dict , lowerCAmelCase_ : str ) -> str:
__lowerCAmelCase = cls()
d.add_from_file(lowerCAmelCase_ )
return d
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int=1 , lowerCAmelCase_ : Any=False ) -> Optional[Any]:
if word in self.indices and not overwrite:
__lowerCAmelCase = self.indices[word]
__lowerCAmelCase = self.count[idx] + n
return idx
else:
__lowerCAmelCase = len(self.symbols )
__lowerCAmelCase = idx
self.symbols.append(lowerCAmelCase_ )
self.count.append(lowerCAmelCase_ )
return idx
def lowercase ( self : str , lowerCAmelCase_ : Union[str, Any] ) -> Dict:
return 0
def lowercase ( self : Tuple , lowerCAmelCase_ : Union[str, Any] ) -> int:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
try:
with open(lowerCAmelCase_ , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(lowerCAmelCase_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(lowerCAmelCase_ ) )
return
__lowerCAmelCase = f.readlines()
__lowerCAmelCase = self._load_meta(lowerCAmelCase_ )
for line in lines[indices_start_line:]:
try:
__lowerCAmelCase , __lowerCAmelCase = line.rstrip().rsplit(' ' , 1 )
if field == "#fairseq:overwrite":
__lowerCAmelCase = True
__lowerCAmelCase , __lowerCAmelCase = line.rsplit(' ' , 1 )
else:
__lowerCAmelCase = False
__lowerCAmelCase = int(lowerCAmelCase_ )
__lowerCAmelCase = line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(lowerCAmelCase_ ) )
self.add_symbol(lowerCAmelCase_ , n=lowerCAmelCase_ , overwrite=lowerCAmelCase_ )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def a_ ( lowerCAmelCase_ : List[str] ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
__lowerCAmelCase = dict((re.sub(R'@@$', '', lowerCAmelCase_ ), v) if k.endswith('@@' ) else (re.sub(R'$', '</w>', lowerCAmelCase_ ), v) for k, v in d.items() )
__lowerCAmelCase = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
__lowerCAmelCase = d[k] # restore
return da
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : List[str] ):
# prep
if not os.path.exists(lowerCAmelCase_ ):
raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(lowerCAmelCase_, exist_ok=lowerCAmelCase_ )
print(F"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
__lowerCAmelCase = os.path.join(lowerCAmelCase_, 'checkpoint.pt' )
if not os.path.isfile(lowerCAmelCase_ ):
raise ValueError(F"""path to the file {checkpoint_file} does not exist!""" )
__lowerCAmelCase = torch.load(lowerCAmelCase_, map_location='cpu' )
__lowerCAmelCase = chkpt['cfg']['model']
# dicts
__lowerCAmelCase = os.path.join(lowerCAmelCase_, 'dict.txt' )
if not os.path.isfile(lowerCAmelCase_ ):
raise ValueError(F"""path to the file {dict_file} does not exist!""" )
__lowerCAmelCase = Dictionary.load(lowerCAmelCase_ )
__lowerCAmelCase = rewrite_dict_keys(src_dict.indices )
__lowerCAmelCase = len(lowerCAmelCase_ )
__lowerCAmelCase = os.path.join(lowerCAmelCase_, VOCAB_FILES_NAMES['vocab_file'] )
print(F"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(lowerCAmelCase_, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowerCAmelCase_, ensure_ascii=lowerCAmelCase_, indent=lowerCAmelCase_ ) )
# merges_file (bpecodes)
__lowerCAmelCase = os.path.join(lowerCAmelCase_, 'bpecodes' )
if not os.path.isfile(lowerCAmelCase_ ):
raise ValueError(F"""path to the file {bpecodes_file} does not exist!""" )
__lowerCAmelCase = os.path.join(lowerCAmelCase_, VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(lowerCAmelCase_, lowerCAmelCase_ )
# model config
__lowerCAmelCase = os.path.join(lowerCAmelCase_, 'config.json' )
__lowerCAmelCase = {
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.02,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1E-12,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(F"""Generating {biogpt_model_config_file}""" )
with open(lowerCAmelCase_, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowerCAmelCase_, ensure_ascii=lowerCAmelCase_, indent=lowerCAmelCase_ ) )
# tokenizer config
__lowerCAmelCase = os.path.join(lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase = {
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 1024,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(F"""Generating {biogpt_tokenizer_config_file}""" )
with open(lowerCAmelCase_, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowerCAmelCase_, ensure_ascii=lowerCAmelCase_, indent=lowerCAmelCase_ ) )
# model
__lowerCAmelCase = chkpt['model']
# remove unneeded keys
__lowerCAmelCase = [
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
__lowerCAmelCase = model_state_dict.pop(lowerCAmelCase_ )
else:
__lowerCAmelCase = model_state_dict.pop(lowerCAmelCase_ )
__lowerCAmelCase = BioGptConfig.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = BioGptForCausalLM(lowerCAmelCase_ )
# check that it loads ok
model_new.load_state_dict(lowerCAmelCase_ )
# save
__lowerCAmelCase = os.path.join(lowerCAmelCase_, lowerCAmelCase_ )
print(F"""Generating {pytorch_weights_dump_path}""" )
torch.save(lowerCAmelCase_, lowerCAmelCase_ )
print('Conversion is done!' )
if __name__ == "__main__":
_snake_case : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_snake_case : int = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 53
| 0
|
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_lowercase = logging.get_logger(__name__)
_lowercase = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_lowercase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _lowercase :
_UpperCAmelCase = field(
default=_UpperCamelCase , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(_UpperCamelCase )} )
_UpperCAmelCase = field(
default=_UpperCamelCase , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
_UpperCAmelCase = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_UpperCAmelCase = field(
default=128 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , )
_UpperCAmelCase = field(
default=64 , metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
} , )
_UpperCAmelCase = field(
default=30 , metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
} , )
_UpperCAmelCase = field(
default=_UpperCamelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
_UpperCAmelCase = field(
default=_UpperCamelCase , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
_UpperCAmelCase = field(
default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
_UpperCAmelCase = field(
default=20 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
_UpperCAmelCase = field(
default=0 , metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
} , )
_UpperCAmelCase = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} )
class _lowercase ( _UpperCamelCase ):
_UpperCAmelCase = '''train'''
_UpperCAmelCase = '''dev'''
class _lowercase ( _UpperCamelCase ):
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
def __init__( self , A__ , A__ , A__ = None , A__ = Split.train , A__ = False , A__ = None , A__ = "pt" , ) -> Tuple:
snake_case = args
snake_case = is_language_sensitive
snake_case = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
try:
snake_case = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
snake_case = mode
# Load data features from cache or dataset file
snake_case = '''v2''' if args.version_2_with_negative else '''v1'''
snake_case = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}""" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case = cached_features_file + '''.lock'''
with FileLock(lowerCAmelCase_ ):
if os.path.exists(lowerCAmelCase_ ) and not args.overwrite_cache:
snake_case = time.time()
snake_case = torch.load(lowerCAmelCase_ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
snake_case = self.old_features['''features''']
snake_case = self.old_features.get('''dataset''' , lowerCAmelCase_ )
snake_case = self.old_features.get('''examples''' , lowerCAmelCase_ )
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F"""Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"""
''' future run''' )
else:
if mode == Split.dev:
snake_case = self.processor.get_dev_examples(args.data_dir )
else:
snake_case = self.processor.get_train_examples(args.data_dir )
snake_case , snake_case = squad_convert_examples_to_features(
examples=self.examples , tokenizer=lowerCAmelCase_ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=lowerCAmelCase_ , )
snake_case = time.time()
torch.save(
{'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples} , lowerCAmelCase_ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self ) -> List[str]:
return len(self.features )
def __getitem__( self , A__ ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
snake_case = self.features[i]
snake_case = torch.tensor(feature.input_ids , dtype=torch.long )
snake_case = torch.tensor(feature.attention_mask , dtype=torch.long )
snake_case = torch.tensor(feature.token_type_ids , dtype=torch.long )
snake_case = torch.tensor(feature.cls_index , dtype=torch.long )
snake_case = torch.tensor(feature.p_mask , dtype=torch.float )
snake_case = torch.tensor(feature.is_impossible , dtype=torch.float )
snake_case = {
'''input_ids''': input_ids,
'''attention_mask''': attention_mask,
'''token_type_ids''': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'''is_impossible''': is_impossible} )
if self.is_language_sensitive:
inputs.update({'''langs''': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
snake_case = torch.tensor(feature.start_position , dtype=torch.long )
snake_case = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} )
return inputs
| 342
|
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
a_ = """pixel_values"""
a_ = False
a_ = TimmBackboneConfig
def __init__( self : Tuple , lowerCAmelCase_ : Any , **lowerCAmelCase_ : Optional[int] ) -> Optional[Any]:
requires_backends(self , 'timm' )
super().__init__(lowerCAmelCase_ )
__lowerCAmelCase = config
if config.backbone is None:
raise ValueError('backbone is not set in the config. Please set it to a timm model name.' )
if config.backbone not in timm.list_models():
raise ValueError(f"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(lowerCAmelCase_ , 'out_features' ) and config.out_features is not None:
raise ValueError('out_features is not supported by TimmBackbone. Please use out_indices instead.' )
__lowerCAmelCase = getattr(lowerCAmelCase_ , 'use_pretrained_backbone' , lowerCAmelCase_ )
if pretrained is None:
raise ValueError('use_pretrained_backbone is not set in the config. Please set it to True or False.' )
# We just take the final layer by default. This matches the default for the transformers models.
__lowerCAmelCase = config.out_indices if getattr(lowerCAmelCase_ , 'out_indices' , lowerCAmelCase_ ) is not None else (-1,)
__lowerCAmelCase = timm.create_model(
config.backbone , pretrained=lowerCAmelCase_ , features_only=config.features_only , in_chans=config.num_channels , out_indices=lowerCAmelCase_ , **lowerCAmelCase_ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
__lowerCAmelCase = self._backbone.return_layers
__lowerCAmelCase = {layer['module']: str(lowerCAmelCase_ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(lowerCAmelCase_ )
@classmethod
def lowercase ( cls : int , lowerCAmelCase_ : Dict , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['vision', 'timm'] )
from ...models.timm_backbone import TimmBackboneConfig
__lowerCAmelCase = kwargs.pop('config' , TimmBackboneConfig() )
__lowerCAmelCase = kwargs.pop('use_timm_backbone' , lowerCAmelCase_ )
if not use_timm:
raise ValueError('use_timm_backbone must be True for timm backbones' )
__lowerCAmelCase = kwargs.pop('num_channels' , config.num_channels )
__lowerCAmelCase = kwargs.pop('features_only' , config.features_only )
__lowerCAmelCase = kwargs.pop('use_pretrained_backbone' , config.use_pretrained_backbone )
__lowerCAmelCase = kwargs.pop('out_indices' , config.out_indices )
__lowerCAmelCase = TimmBackboneConfig(
backbone=lowerCAmelCase_ , num_channels=lowerCAmelCase_ , features_only=lowerCAmelCase_ , use_pretrained_backbone=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , )
return super()._from_config(lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : Tuple , lowerCAmelCase_ : int ) -> Dict:
pass
def lowercase ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Any=None , **lowerCAmelCase_ : Dict ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
__lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('Cannot output attentions for timm backbones at the moment' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
__lowerCAmelCase = self._all_layers
__lowerCAmelCase = self._backbone(lowerCAmelCase_ , **lowerCAmelCase_ )
__lowerCAmelCase = self._return_layers
__lowerCAmelCase = tuple(hidden_states[i] for i in self.out_indices )
else:
__lowerCAmelCase = self._backbone(lowerCAmelCase_ , **lowerCAmelCase_ )
__lowerCAmelCase = None
__lowerCAmelCase = tuple(lowerCAmelCase_ )
__lowerCAmelCase = tuple(lowerCAmelCase_ ) if hidden_states is not None else None
if not return_dict:
__lowerCAmelCase = (feature_maps,)
if output_hidden_states:
__lowerCAmelCase = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowerCAmelCase_ , hidden_states=lowerCAmelCase_ , attentions=lowerCAmelCase_ )
| 53
| 0
|
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowercase : List[Any] = logging.get_logger(__name__)
class UpperCAmelCase_ ( _UpperCamelCase ):
'''simple docstring'''
A : Any = ['pixel_values']
def __init__( self , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 1 / 255 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = IMAGENET_DEFAULT_MEAN , _SCREAMING_SNAKE_CASE = IMAGENET_DEFAULT_STD , **_SCREAMING_SNAKE_CASE , ) -> None:
super().__init__(**lowerCAmelCase_ )
snake_case_ : List[str] = size if size is not None else {"shortest_edge": 224}
snake_case_ : Dict = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
snake_case_ : str = crop_size if crop_size is not None else {"height": 224, "width": 224}
snake_case_ : List[str] = get_size_dict(lowerCAmelCase_ , param_name="crop_size" )
snake_case_ : int = do_resize
snake_case_ : List[Any] = size
snake_case_ : Any = resample
snake_case_ : Optional[int] = do_center_crop
snake_case_ : Tuple = crop_size
snake_case_ : Any = do_rescale
snake_case_ : Tuple = rescale_factor
snake_case_ : Union[str, Any] = do_normalize
snake_case_ : List[str] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
snake_case_ : Any = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray:
snake_case_ : List[Any] = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
snake_case_ : Tuple = int((256 / 224) * size["shortest_edge"] )
snake_case_ : Dict = get_resize_output_image_size(lowerCAmelCase_ , size=lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
snake_case_ : Dict = {"height": output_size[0], "width": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
lowerCAmelCase_ , size=(size_dict["height"], size_dict["width"]) , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray:
snake_case_ : List[str] = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(lowerCAmelCase_ , size=(size["height"], size["width"]) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray:
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray:
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE , ) -> BatchFeature:
snake_case_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
snake_case_ : List[Any] = resample if resample is not None else self.resample
snake_case_ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : Optional[Any] = image_mean if image_mean is not None else self.image_mean
snake_case_ : Optional[Any] = image_std if image_std is not None else self.image_std
snake_case_ : Union[str, Any] = size if size is not None else self.size
snake_case_ : str = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
snake_case_ : Dict = crop_size if crop_size is not None else self.crop_size
snake_case_ : List[Any] = get_size_dict(lowerCAmelCase_ , param_name="crop_size" )
snake_case_ : int = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
snake_case_ : Any = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
snake_case_ : Optional[Any] = [self.resize(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
if do_center_crop:
snake_case_ : Any = [self.center_crop(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
if do_rescale:
snake_case_ : Optional[int] = [self.rescale(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
if do_normalize:
snake_case_ : Optional[Any] = [self.normalize(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
snake_case_ : Tuple = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
snake_case_ : int = {"pixel_values": images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 568
|
from __future__ import annotations
def a_ ( lowerCAmelCase_ : list[float] ):
if len(lowerCAmelCase_ ) < 2:
raise ValueError('Monogons and Digons are not polygons in the Euclidean space' )
if any(i <= 0 for i in nums ):
raise ValueError('All values must be greater than 0' )
__lowerCAmelCase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase : Optional[Any] = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : int = ['MaskFormerFeatureExtractor']
lowercase : List[Any] = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Union[str, Any] = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
lowercase : Optional[int] = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
lowercase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 302
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Dict=3_2 , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : Union[str, Any]=1_0 , lowerCAmelCase_ : List[str]=[1_0, 2_0, 3_0, 4_0] , lowerCAmelCase_ : Optional[int]=[1, 1, 2, 1] , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Tuple="relu" , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : Optional[int]=None , ) -> int:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = embeddings_size
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = depths
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = hidden_act
__lowerCAmelCase = num_labels
__lowerCAmelCase = scope
__lowerCAmelCase = len(lowerCAmelCase_ )
def lowercase ( self : Optional[int] ) -> List[Any]:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = self.get_config()
return config, pixel_values
def lowercase ( self : Tuple ) -> List[Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowercase ( self : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] ) -> str:
__lowerCAmelCase = FlaxRegNetModel(config=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def lowercase ( self : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int ) -> Tuple:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = FlaxRegNetForImageClassification(config=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : List[Any] ) -> Optional[Any]:
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
a_ = False
a_ = False
a_ = False
def lowercase ( self : Dict ) -> None:
__lowerCAmelCase = FlaxRegNetModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ )
def lowercase ( self : int ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self : str ) -> Union[str, Any]:
return
def lowercase ( self : Dict ) -> str:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] ) -> Tuple:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def lowercase ( self : Union[str, Any] ) -> Any:
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def lowercase ( self : Tuple ) -> Tuple:
pass
def lowercase ( self : Optional[Any] ) -> str:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowercase ( self : List[Any] ) -> Union[str, Any]:
def check_hidden_states_output(lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ):
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
__lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase_ ) , expected_num_stages + 1 )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : str ) -> str:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCAmelCase = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = model_class(lowerCAmelCase_ )
@jax.jit
def model_jitted(lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Dict ):
return model(pixel_values=lowerCAmelCase_ , **lowerCAmelCase_ )
with self.subTest('JIT Enabled' ):
__lowerCAmelCase = model_jitted(**lowerCAmelCase_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__lowerCAmelCase = model_jitted(**lowerCAmelCase_ ).to_tuple()
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def a_ ( ):
__lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase ( self : Union[str, Any] ) -> Optional[Any]:
return AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None
@slow
def lowercase ( self : Optional[Any] ) -> Union[str, Any]:
__lowerCAmelCase = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=lowerCAmelCase_ , return_tensors='np' )
__lowerCAmelCase = model(**lowerCAmelCase_ )
# verify the logits
__lowerCAmelCase = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__lowerCAmelCase = jnp.array([-0.41_80, -1.50_51, -3.48_36] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 53
| 0
|
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowercase_ (_UpperCamelCase ):
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(lowerCAmelCase_ , 'hidden_sizes'))
self.parent.assertTrue(hasattr(lowerCAmelCase_ , 'neck_hidden_sizes'))
self.parent.assertTrue(hasattr(lowerCAmelCase_ , 'num_attention_heads'))
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=32 , lowercase_=2 , lowercase_=3 , lowercase_=640 , lowercase_=4 , lowercase_="silu" , lowercase_=3 , lowercase_=32 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=0.02 , lowercase_=True , lowercase_=True , lowercase_=10 , lowercase_=None , ) -> int:
a__ =parent
a__ =batch_size
a__ =image_size
a__ =patch_size
a__ =num_channels
a__ =last_hidden_size
a__ =num_attention_heads
a__ =hidden_act
a__ =conv_kernel_size
a__ =output_stride
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =classifier_dropout_prob
a__ =use_labels
a__ =is_training
a__ =num_labels
a__ =initializer_range
a__ =scope
def __UpperCamelCase ( self) -> Dict:
a__ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__ =None
a__ =None
if self.use_labels:
a__ =ids_tensor([self.batch_size] , self.num_labels)
a__ =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
a__ =self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCamelCase ( self) -> Optional[Any]:
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Any:
a__ =MobileViTModel(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a__ =model(lowerCAmelCase_)
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> int:
a__ =self.num_labels
a__ =MobileViTForImageClassification(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a__ =model(lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Dict:
a__ =self.num_labels
a__ =MobileViTForSemanticSegmentation(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a__ =model(lowerCAmelCase_)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
a__ =model(lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCamelCase ( self) -> int:
a__ =self.prepare_config_and_inputs()
a__ , a__ , a__ , a__ =config_and_inputs
a__ ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ (_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
snake_case =(
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
snake_case =(
{
'feature-extraction': MobileViTModel,
'image-classification': MobileViTForImageClassification,
'image-segmentation': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
snake_case =False
snake_case =False
snake_case =False
snake_case =False
def __UpperCamelCase ( self) -> Any:
a__ =MobileViTModelTester(self)
a__ =MobileViTConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_)
def __UpperCamelCase ( self) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds')
def __UpperCamelCase ( self) -> Dict:
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings')
def __UpperCamelCase ( self) -> Optional[int]:
pass
@unittest.skip(reason='MobileViT does not output attentions')
def __UpperCamelCase ( self) -> Tuple:
pass
def __UpperCamelCase ( self) -> Optional[Any]:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowerCAmelCase_)
a__ =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ =[*signature.parameters.keys()]
a__ =['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_)
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __UpperCamelCase ( self) -> int:
pass
def __UpperCamelCase ( self) -> int:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_)
def __UpperCamelCase ( self) -> Optional[Any]:
def check_hidden_states_output(lowercase_ , lowercase_ , lowercase_):
a__ =model_class(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
with torch.no_grad():
a__ =model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_))
a__ =outputs.hidden_states
a__ =5
self.assertEqual(len(lowerCAmelCase_) , lowerCAmelCase_)
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
a__ =2
for i in range(len(lowerCAmelCase_)):
self.assertListEqual(
list(hidden_states[i].shape[-2:]) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2)
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__ =True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_)
def __UpperCamelCase ( self) -> Tuple:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCAmelCase_)
@slow
def __UpperCamelCase ( self) -> Optional[Any]:
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ =MobileViTModel.from_pretrained(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
def _lowercase( ):
a__ =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase_ (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self) -> List[Any]:
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small') if is_vision_available() else None
@slow
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small').to(lowerCAmelCase_)
a__ =self.default_image_processor
a__ =prepare_img()
a__ =image_processor(images=lowerCAmelCase_ , return_tensors='pt').to(lowerCAmelCase_)
# forward pass
with torch.no_grad():
a__ =model(**lowerCAmelCase_)
# verify the logits
a__ =torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , lowerCAmelCase_)
a__ =torch.tensor([-1.93_64, -1.23_27, -0.46_53]).to(lowerCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4))
@slow
def __UpperCamelCase ( self) -> Dict:
a__ =MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small')
a__ =model.to(lowerCAmelCase_)
a__ =MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small')
a__ =prepare_img()
a__ =image_processor(images=lowerCAmelCase_ , return_tensors='pt').to(lowerCAmelCase_)
# forward pass
with torch.no_grad():
a__ =model(**lowerCAmelCase_)
a__ =outputs.logits
# verify the logits
a__ =torch.Size((1, 21, 32, 32))
self.assertEqual(logits.shape , lowerCAmelCase_)
a__ =torch.tensor(
[
[[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]],
[[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]],
[[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]],
] , device=lowerCAmelCase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCAmelCase_ , atol=1e-4))
@slow
def __UpperCamelCase ( self) -> List[str]:
a__ =MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small')
a__ =model.to(lowerCAmelCase_)
a__ =MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small')
a__ =prepare_img()
a__ =image_processor(images=lowerCAmelCase_ , return_tensors='pt').to(lowerCAmelCase_)
# forward pass
with torch.no_grad():
a__ =model(**lowerCAmelCase_)
a__ =outputs.logits.detach().cpu()
a__ =image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase_ , target_sizes=[(50, 60)])
a__ =torch.Size((50, 60))
self.assertEqual(segmentation[0].shape , lowerCAmelCase_)
a__ =image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase_)
a__ =torch.Size((32, 32))
self.assertEqual(segmentation[0].shape , lowerCAmelCase_)
| 20
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
_snake_case : Optional[int] = logging.getLogger(__name__)
_snake_case : Dict = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
_snake_case : List[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(_UpperCamelCase )} , )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
a_ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def lowercase ( self : List[Any] ) -> List[Any]:
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path' )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
a_ = field(default=_UpperCamelCase , metadata={"""help""": """The input training data file (a text file)."""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
a_ = field(
default=5 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated. Default to the max input length of the model."""
)
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
a_ = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
def lowercase ( self : int ) -> int:
if self.train_file is not None:
__lowerCAmelCase = self.train_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
__lowerCAmelCase = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : Union[str, Any] ):
with open(lowerCAmelCase_, 'r', encoding='utf-8' ) as f:
__lowerCAmelCase = [json.loads(lowerCAmelCase_ ) for line in f.read().splitlines() if (len(lowerCAmelCase_ ) > 0 and not line.isspace())]
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
__lowerCAmelCase = {c: dataset[c] for c in dataset.column_names}
__lowerCAmelCase = refs
return Dataset.from_dict(lowerCAmelCase_ )
def a_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout )], )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s', lowerCAmelCase_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__lowerCAmelCase = load_dataset(data_args.dataset_name, data_args.dataset_config_name )
if "validation" not in datasets.keys():
__lowerCAmelCase = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=F"""train[:{data_args.validation_split_percentage}%]""", )
__lowerCAmelCase = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=F"""train[{data_args.validation_split_percentage}%:]""", )
else:
__lowerCAmelCase = {}
if data_args.train_file is not None:
__lowerCAmelCase = data_args.train_file
if data_args.validation_file is not None:
__lowerCAmelCase = data_args.validation_file
__lowerCAmelCase = data_args.train_file.split('.' )[-1]
if extension == "txt":
__lowerCAmelCase = 'text'
__lowerCAmelCase = load_dataset(lowerCAmelCase_, data_files=lowerCAmelCase_ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
__lowerCAmelCase = AutoConfig.from_pretrained(model_args.config_name, **lowerCAmelCase_ )
elif model_args.model_name_or_path:
__lowerCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path, **lowerCAmelCase_ )
else:
__lowerCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
__lowerCAmelCase = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
__lowerCAmelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **lowerCAmelCase_ )
elif model_args.model_name_or_path:
__lowerCAmelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **lowerCAmelCase_ )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
__lowerCAmelCase = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path, from_tf=bool('.ckpt' in model_args.model_name_or_path ), config=lowerCAmelCase_, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
else:
logger.info('Training new model from scratch' )
__lowerCAmelCase = AutoModelForMaskedLM.from_config(lowerCAmelCase_ )
model.resize_token_embeddings(len(lowerCAmelCase_ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
__lowerCAmelCase = datasets['train'].column_names
else:
__lowerCAmelCase = datasets['validation'].column_names
__lowerCAmelCase = 'text' if 'text' in column_names else column_names[0]
__lowerCAmelCase = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(lowerCAmelCase_ : str ):
# Remove empty lines
__lowerCAmelCase = [line for line in examples['text'] if len(lowerCAmelCase_ ) > 0 and not line.isspace()]
return tokenizer(examples['text'], padding=lowerCAmelCase_, truncation=lowerCAmelCase_, max_length=data_args.max_seq_length )
__lowerCAmelCase = datasets.map(
lowerCAmelCase_, batched=lowerCAmelCase_, num_proc=data_args.preprocessing_num_workers, remove_columns=[text_column_name], load_from_cache_file=not data_args.overwrite_cache, )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
__lowerCAmelCase = add_chinese_references(tokenized_datasets['train'], data_args.train_ref_file )
if data_args.validation_ref_file is not None:
__lowerCAmelCase = add_chinese_references(
tokenized_datasets['validation'], data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
__lowerCAmelCase = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
__lowerCAmelCase = False
# Data collator
# This one will take care of randomly masking the tokens.
__lowerCAmelCase = DataCollatorForWholeWordMask(tokenizer=lowerCAmelCase_, mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowerCAmelCase = Trainer(
model=lowerCAmelCase_, args=lowerCAmelCase_, train_dataset=tokenized_datasets['train'] if training_args.do_train else None, eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None, tokenizer=lowerCAmelCase_, data_collator=lowerCAmelCase_, )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__lowerCAmelCase = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
__lowerCAmelCase = model_args.model_name_or_path
else:
__lowerCAmelCase = None
__lowerCAmelCase = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
__lowerCAmelCase = os.path.join(training_args.output_dir, 'train_results.txt' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase_, 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir, 'trainer_state.json' ) )
# Evaluation
__lowerCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowerCAmelCase = trainer.evaluate()
__lowerCAmelCase = math.exp(eval_output['eval_loss'] )
__lowerCAmelCase = perplexity
__lowerCAmelCase = os.path.join(training_args.output_dir, 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase_, 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
return results
def a_ ( lowerCAmelCase_ : Tuple ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 53
| 0
|
class UpperCAmelCase( _UpperCamelCase ):
"""simple docstring"""
pass
class UpperCAmelCase( _UpperCamelCase ):
"""simple docstring"""
pass
class UpperCAmelCase:
"""simple docstring"""
def __init__( self ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : List[str] = [
[],
[],
[],
]
def __a ( self , lowerCamelCase , lowerCamelCase ) -> None:
"""simple docstring"""
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError("Maximum queue size is 100" )
self.queues[priority].append(lowerCAmelCase_ )
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2" )
def __a ( self ) -> int:
"""simple docstring"""
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("All queues are empty" )
def __str__( self ) -> str:
"""simple docstring"""
return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues ) )
class UpperCAmelCase:
"""simple docstring"""
def __init__( self ) -> Dict:
"""simple docstring"""
lowercase__ : Optional[Any] = []
def __a ( self , lowerCamelCase ) -> None:
"""simple docstring"""
if len(self.queue ) == 100:
raise OverFlowError("Maximum queue size is 100" )
self.queue.append(lowerCAmelCase_ )
def __a ( self ) -> int:
"""simple docstring"""
if not self.queue:
raise UnderFlowError("The queue is empty" )
else:
lowercase__ : Any = min(self.queue )
self.queue.remove(lowerCAmelCase_ )
return data
def __str__( self ) -> str:
"""simple docstring"""
return str(self.queue )
def snake_case_ ( ) -> Union[str, Any]:
lowercase__ : Optional[Any] = FixedPriorityQueue()
fpq.enqueue(0 ,10 )
fpq.enqueue(1 ,70 )
fpq.enqueue(0 ,1_00 )
fpq.enqueue(2 ,1 )
fpq.enqueue(2 ,5 )
fpq.enqueue(1 ,7 )
fpq.enqueue(2 ,4 )
fpq.enqueue(1 ,64 )
fpq.enqueue(0 ,1_28 )
print(lowerCAmelCase_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowerCAmelCase_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def snake_case_ ( ) -> List[str]:
lowercase__ : Optional[int] = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(1_00 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(1_28 )
print(lowerCAmelCase_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowerCAmelCase_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 397
|
def a_ ( lowerCAmelCase_ : int = 200_0000 ):
__lowerCAmelCase = [0 for i in range(n + 1 )]
__lowerCAmelCase = 1
__lowerCAmelCase = 1
for i in range(2, int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i, n + 1, lowerCAmelCase_ ):
__lowerCAmelCase = 1
__lowerCAmelCase = 0
for i in range(lowerCAmelCase_ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F"""{solution() = }""")
| 53
| 0
|
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
UpperCamelCase__ = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
UpperCamelCase__ = (
subprocess.check_output(F"""git diff --diff-filter=d --name-only {fork_point_sha}""".split()).decode('utf-8').split()
)
UpperCamelCase__ = '|'.join(sys.argv[1:])
UpperCamelCase__ = re.compile(rF"""^({joined_dirs}).*?\.py$""")
UpperCamelCase__ = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 322
|
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
_snake_case : Tuple = logging.getLogger()
_snake_case : Any = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def lowercase ( self : Any , lowerCAmelCase_ : Dict ) -> Optional[int]:
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
__lowerCAmelCase = {'source': 'What is love ?', 'target': 'life'}
__lowerCAmelCase = {'train': 1_2, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
__lowerCAmelCase = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(lowerCAmelCase_ , f"""{split}.{field}""" ) , 'w' ) as f:
f.write(lowerCAmelCase_ )
def lowercase ( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : str = "pytorch" ) -> List[str]:
__lowerCAmelCase = self.get_auto_remove_tmp_dir()
__lowerCAmelCase = os.path.join(lowerCAmelCase_ , 'output' )
__lowerCAmelCase = os.path.join(lowerCAmelCase_ , 'data' )
self._create_dummy_data(data_dir=lowerCAmelCase_ )
__lowerCAmelCase = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
__lowerCAmelCase = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(lowerCAmelCase_ , env=self.get_env() )
__lowerCAmelCase = os.path.join(lowerCAmelCase_ , 'metrics.json' )
with open(lowerCAmelCase_ ) as f:
__lowerCAmelCase = json.load(lowerCAmelCase_ )
return result
@require_torch_gpu
def lowercase ( self : str ) -> int:
__lowerCAmelCase = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
def lowercase ( self : List[str] ) -> Dict:
__lowerCAmelCase = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_gpu
@require_ray
def lowercase ( self : int ) -> Tuple:
__lowerCAmelCase = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
@require_ray
def lowercase ( self : List[Any] ) -> str:
__lowerCAmelCase = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
| 53
| 0
|
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
A__ = False
A__ = True
A__ = False
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
parser.add_argument(
"""--repo_path""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
A__ = parser.parse_args()
A__ = {
'image_size': 'sample_size',
'num_res_blocks': 'layers_per_block',
'block_channels': 'block_out_channels',
'down_blocks': 'down_block_types',
'up_blocks': 'up_block_types',
'downscale_freq_shift': 'freq_shift',
'resnet_num_groups': 'norm_num_groups',
'resnet_act_fn': 'act_fn',
'resnet_eps': 'norm_eps',
'num_head_channels': 'attention_head_dim',
}
A__ = {
'time_steps': 'time_proj',
'mid': 'mid_block',
'downsample_blocks': 'down_blocks',
'upsample_blocks': 'up_blocks',
}
A__ = '' if has_file(args.repo_path, """config.json""") else 'unet'
with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader:
A__ = reader.read()
A__ = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, """config.json"""):
A__ = UNetaDModel(**config)
else:
A__ = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel
A__ = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
A__ = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
A__ = config[key]
del config[key]
A__ = [k.replace("""UNetRes""", """""") for k in config['down_block_types']]
A__ = [k.replace("""UNetRes""", """""") for k in config['up_block_types']]
if do_only_weights:
A__ = torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin"""))
A__ = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""):
continue
A__ = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(""".""")[0] == key:
A__ = param_value
A__ = True
if not has_changed:
A__ = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 166
|
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Union[str, Any]="resnet50" , lowerCAmelCase_ : str=3 , lowerCAmelCase_ : List[str]=3_2 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Optional[Any]=True , ) -> List[Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = out_indices if out_indices is not None else [4]
__lowerCAmelCase = stage_names
__lowerCAmelCase = out_features
__lowerCAmelCase = backbone
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = use_pretrained_backbone
__lowerCAmelCase = is_training
def lowercase ( self : List[str] ) -> Any:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = self.get_config()
return config, pixel_values
def lowercase ( self : List[Any] ) -> Union[str, Any]:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowercase ( self : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ) -> int:
__lowerCAmelCase = TimmBackbone(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 1_4, 1_4) , )
def lowercase ( self : List[str] ) -> str:
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (TimmBackbone,) if is_torch_available() else ()
a_ = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
a_ = False
a_ = False
a_ = False
a_ = False
def lowercase ( self : Tuple ) -> int:
__lowerCAmelCase = TimmBackboneModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ )
def lowercase ( self : Dict ) -> List[str]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self : Union[str, Any] ) -> Optional[int]:
__lowerCAmelCase = 'resnet18'
__lowerCAmelCase = 'microsoft/resnet-18'
__lowerCAmelCase = AutoBackbone.from_pretrained(lowerCAmelCase_ , use_timm_backbone=lowerCAmelCase_ )
__lowerCAmelCase = AutoBackbone.from_pretrained(lowerCAmelCase_ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__lowerCAmelCase = AutoBackbone.from_pretrained(lowerCAmelCase_ , use_timm_backbone=lowerCAmelCase_ , out_indices=[1, 2, 3] )
__lowerCAmelCase = AutoBackbone.from_pretrained(lowerCAmelCase_ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def lowercase ( self : List[str] ) -> Tuple:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def lowercase ( self : Dict ) -> int:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def lowercase ( self : str ) -> str:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def lowercase ( self : Any ) -> str:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def lowercase ( self : Optional[int] ) -> Optional[Any]:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def lowercase ( self : Dict ) -> Any:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def lowercase ( self : Any ) -> Optional[int]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def lowercase ( self : Union[str, Any] ) -> Tuple:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def lowercase ( self : List[str] ) -> Optional[int]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def lowercase ( self : Dict ) -> int:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def lowercase ( self : Tuple ) -> List[str]:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def lowercase ( self : int ) -> Optional[int]:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def lowercase ( self : Union[str, Any] ) -> str:
pass
@unittest.skip('Safetensors is not supported by timm.' )
def lowercase ( self : Dict ) -> str:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase ( self : List[str] ) -> Optional[Any]:
pass
def lowercase ( self : Union[str, Any] ) -> Optional[Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowercase ( self : int ) -> Union[str, Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
__lowerCAmelCase = self.has_attentions
# no need to test all models as different heads yield the same functionality
__lowerCAmelCase = self.all_model_classes[0]
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
__lowerCAmelCase = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = model(**lowerCAmelCase_ )
__lowerCAmelCase = outputs[0][-1]
# Encoder-/Decoder-only models
__lowerCAmelCase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__lowerCAmelCase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowerCAmelCase_ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowercase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(**lowerCAmelCase_ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__lowerCAmelCase = copy.deepcopy(lowerCAmelCase_ )
__lowerCAmelCase = None
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(**lowerCAmelCase_ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__lowerCAmelCase = copy.deepcopy(lowerCAmelCase_ )
__lowerCAmelCase = False
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(**lowerCAmelCase_ )
| 53
| 0
|
from __future__ import annotations
import math
from collections.abc import Callable
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase = 100 ,) -> Optional[Any]:
snake_case : int = x_start
snake_case : List[Any] = fnc(lowerCAmelCase_ )
snake_case : Tuple = 0.0
for _ in range(lowerCAmelCase_ ):
# Approximates curve as a sequence of linear lines and sums their length
snake_case : Optional[int] = (x_end - x_start) / steps + xa
snake_case : List[str] = fnc(lowerCAmelCase_ )
length += math.hypot(xa - xa ,fxa - fxa )
# Increment step
snake_case : Any = xa
snake_case : Union[str, Any] = fxa
return length
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Dict:
return math.sin(10 * x )
print('f(x) = sin(10 * x)')
print('The length of the curve from x = -10 to x = 10 is:')
lowerCamelCase : Union[str, Any] = 1_0
while i <= 1_0_0_0_0_0:
print(f"""With {i} steps: {line_length(f, -1_0, 1_0, i)}""")
i *= 1_0
| 587
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def a_ ( lowerCAmelCase_ : str=None ):
if subparsers is not None:
__lowerCAmelCase = subparsers.add_parser('env' )
else:
__lowerCAmelCase = argparse.ArgumentParser('Accelerate env command' )
parser.add_argument(
'--config_file', default=lowerCAmelCase_, help='The config file to use for the default values in the launching script.' )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase_ )
return parser
def a_ ( lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = torch.__version__
__lowerCAmelCase = torch.cuda.is_available()
__lowerCAmelCase = is_xpu_available()
__lowerCAmelCase = is_npu_available()
__lowerCAmelCase = 'Not found'
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(lowerCAmelCase_ ):
__lowerCAmelCase = load_config_from_file(args.config_file ).to_dict()
__lowerCAmelCase = {
'`Accelerate` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'Numpy version': np.__version__,
'PyTorch version (GPU?)': F"""{pt_version} ({pt_cuda_available})""",
'PyTorch XPU available': str(lowerCAmelCase_ ),
'PyTorch NPU available': str(lowerCAmelCase_ ),
'System RAM': F"""{psutil.virtual_memory().total / 1024 ** 3:.2f} GB""",
}
if pt_cuda_available:
__lowerCAmelCase = torch.cuda.get_device_name()
print('\nCopy-and-paste the text below in your GitHub issue\n' )
print('\n'.join([F"""- {prop}: {val}""" for prop, val in info.items()] ) )
print('- `Accelerate` default config:' if args.config_file is None else '- `Accelerate` config passed:' )
__lowerCAmelCase = (
'\n'.join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(lowerCAmelCase_, lowerCAmelCase_ )
else F"""\t{accelerate_config}"""
)
print(lowerCAmelCase_ )
__lowerCAmelCase = accelerate_config
return info
def a_ ( ):
__lowerCAmelCase = env_command_parser()
__lowerCAmelCase = parser.parse_args()
env_command(lowerCAmelCase_ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 53
| 0
|
import requests
UpperCamelCase = '' # <-- Put your OpenWeatherMap appid here!
UpperCamelCase = 'https://api.openweathermap.org/data/2.5/'
def lowerCamelCase_ ( _lowercase = "Chicago" , _lowercase = APPID ) -> str:
return requests.get(URL_BASE + "weather" , params=locals() ).json()
def lowerCamelCase_ ( _lowercase = "Kolkata, India" , _lowercase = APPID ) -> int:
return requests.get(URL_BASE + "forecast" , params=locals() ).json()
def lowerCamelCase_ ( _lowercase = 55.68 , _lowercase = 12.57 , _lowercase = APPID ) -> int:
return requests.get(URL_BASE + "onecall" , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
UpperCamelCase = input('Enter a location:').strip()
if location:
pprint(current_weather(location))
else:
break
| 520
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def a_ ( ):
__lowerCAmelCase = ArgumentParser(
description=(
'PyTorch TPU distributed training launch '
'helper utility that will spawn up '
'multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores', type=lowerCAmelCase_, default=1, help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script', type=lowerCAmelCase_, help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
), )
# rest from the training program
parser.add_argument('training_script_args', nargs=lowerCAmelCase_ )
return parser.parse_args()
def a_ ( ):
__lowerCAmelCase = parse_args()
# Import training_script as a module.
__lowerCAmelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__lowerCAmelCase = script_fpath.stem
__lowerCAmelCase = importlib.import_module(lowerCAmelCase_ )
# Patch sys.argv
__lowerCAmelCase = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn, args=(), nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 53
| 0
|
import qiskit
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : int = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE : int = qiskit.QuantumCircuit(lowerCAmelCase_ , lowerCAmelCase_ )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
SCREAMING_SNAKE_CASE : Union[str, Any] = qiskit.execute(lowerCAmelCase_ , lowerCAmelCase_ , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowerCAmelCase_ )
if __name__ == "__main__":
print(f"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 248
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict=1_3 , lowerCAmelCase_ : str=3_2 , lowerCAmelCase_ : Optional[Any]=3 , lowerCAmelCase_ : Any=4 , lowerCAmelCase_ : str=[1_0, 2_0, 3_0, 4_0] , lowerCAmelCase_ : Tuple=[2, 2, 3, 2] , lowerCAmelCase_ : str=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[int]=3_7 , lowerCAmelCase_ : Dict="gelu" , lowerCAmelCase_ : List[Any]=1_0 , lowerCAmelCase_ : str=0.02 , lowerCAmelCase_ : Dict=["stage2", "stage3", "stage4"] , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : List[Any]=None , ) -> int:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = num_stages
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = depths
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = out_features
__lowerCAmelCase = num_labels
__lowerCAmelCase = scope
__lowerCAmelCase = num_stages
def lowercase ( self : Dict ) -> List[str]:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def lowercase ( self : List[str] ) -> Union[str, Any]:
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def lowercase ( self : Dict ) -> List[str]:
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_1_2 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=lowerCAmelCase_ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=4_0 , auxiliary_channels=2_5_6 , auxiliary_num_convs=1 , auxiliary_concat_input=lowerCAmelCase_ , loss_ignore_index=2_5_5 , num_labels=self.num_labels , )
def lowercase ( self : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int ) -> Optional[Any]:
__lowerCAmelCase = UperNetForSemanticSegmentation(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
a_ = {"""image-segmentation""": UperNetForSemanticSegmentation} if is_torch_available() else {}
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def lowercase ( self : Optional[int] ) -> Dict:
__lowerCAmelCase = UperNetModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=3_7 )
def lowercase ( self : List[str] ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self : Tuple ) -> Union[str, Any]:
return
def lowercase ( self : Optional[int] ) -> Optional[Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowercase ( self : List[Any] ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCAmelCase_ )
@unittest.skip(reason='UperNet does not use inputs_embeds' )
def lowercase ( self : Optional[int] ) -> Dict:
pass
@unittest.skip(reason='UperNet does not support input and output embeddings' )
def lowercase ( self : Optional[Any] ) -> Dict:
pass
@unittest.skip(reason='UperNet does not have a base model' )
def lowercase ( self : Optional[int] ) -> List[Any]:
pass
@unittest.skip(reason='UperNet does not have a base model' )
def lowercase ( self : str ) -> Dict:
pass
@require_torch_multi_gpu
@unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def lowercase ( self : Optional[Any] ) -> Optional[int]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase ( self : Tuple ) -> List[Any]:
pass
def lowercase ( self : Union[str, Any] ) -> Tuple:
def check_hidden_states_output(lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] ):
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
__lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase_ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Any ) -> Tuple:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = _config_zero_init(lowerCAmelCase_ )
__lowerCAmelCase = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(config=lowerCAmelCase_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason='UperNet does not have tied weights' )
def lowercase ( self : Any ) -> int:
pass
@slow
def lowercase ( self : Optional[int] ) -> Optional[int]:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = UperNetForSemanticSegmentation.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def a_ ( ):
__lowerCAmelCase = hf_hub_download(
repo_id='hf-internal-testing/fixtures_ade20k', repo_type='dataset', filename='ADE_val_00000001.jpg' )
__lowerCAmelCase = Image.open(lowerCAmelCase_ ).convert('RGB' )
return image
@require_torch
@require_vision
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Dict ) -> Union[str, Any]:
__lowerCAmelCase = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny' )
__lowerCAmelCase = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny' ).to(lowerCAmelCase_ )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = processor(images=lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
__lowerCAmelCase = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
def lowercase ( self : List[Any] ) -> List[str]:
__lowerCAmelCase = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny' )
__lowerCAmelCase = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny' ).to(lowerCAmelCase_ )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = processor(images=lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
__lowerCAmelCase = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 53
| 0
|
def _UpperCAmelCase ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] ):
"""simple docstring"""
__lowerCamelCase : List[Any] = """"""
for i in table:
res += inp[i - 1]
return res
def _UpperCAmelCase ( UpperCAmelCase : Union[str, Any] ):
"""simple docstring"""
return data[1:] + data[0]
def _UpperCAmelCase ( UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase : Optional[int] = """"""
for i in range(len(lowerCAmelCase_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def _UpperCAmelCase ( UpperCAmelCase : List[str] , UpperCAmelCase : List[str] ):
"""simple docstring"""
__lowerCamelCase : Dict = int("""0b""" + data[0] + data[-1] , 2 )
__lowerCamelCase : Union[str, Any] = int("""0b""" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def _UpperCAmelCase ( UpperCAmelCase : Any , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : Any ):
"""simple docstring"""
__lowerCamelCase : Dict = message[:4]
__lowerCamelCase : Union[str, Any] = message[4:]
__lowerCamelCase : Optional[int] = apply_table(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCamelCase : int = xor(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCamelCase : List[str] = apply_sbox(lowerCAmelCase_ , temp[:4] ) # noqa: E741
__lowerCamelCase : Any = apply_sbox(lowerCAmelCase_ , temp[4:] )
__lowerCamelCase : Union[str, Any] = """0""" * (2 - len(lowerCAmelCase_ )) + l # noqa: E741
__lowerCamelCase : Dict = """0""" * (2 - len(lowerCAmelCase_ )) + r
__lowerCamelCase : str = apply_table(l + r , lowerCAmelCase_ )
__lowerCamelCase : Tuple = xor(lowerCAmelCase_ , lowerCAmelCase_ )
return temp + right
if __name__ == "__main__":
__UpperCamelCase : str = input('Enter 10 bit key: ')
__UpperCamelCase : Any = input('Enter 8 bit message: ')
__UpperCamelCase : Tuple = [6, 3, 7, 4, 8, 5, 10, 9]
__UpperCamelCase : Union[str, Any] = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
__UpperCamelCase : List[Any] = [2, 4, 3, 1]
__UpperCamelCase : Tuple = [2, 6, 3, 1, 4, 8, 5, 7]
__UpperCamelCase : Union[str, Any] = [4, 1, 3, 5, 7, 2, 8, 6]
__UpperCamelCase : List[Any] = [4, 1, 2, 3, 2, 3, 4, 1]
__UpperCamelCase : Optional[Any] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
__UpperCamelCase : Dict = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
__UpperCamelCase : Optional[Any] = apply_table(key, paa_table)
__UpperCamelCase : Any = temp[:5]
__UpperCamelCase : Dict = temp[5:]
__UpperCamelCase : Dict = left_shift(left)
__UpperCamelCase : Any = left_shift(right)
__UpperCamelCase : Optional[int] = apply_table(left + right, pa_table)
__UpperCamelCase : Optional[Any] = left_shift(left)
__UpperCamelCase : Any = left_shift(right)
__UpperCamelCase : Tuple = left_shift(left)
__UpperCamelCase : List[str] = left_shift(right)
__UpperCamelCase : Optional[Any] = apply_table(left + right, pa_table)
# encryption
__UpperCamelCase : Any = apply_table(message, IP)
__UpperCamelCase : Optional[Any] = function(expansion, sa, sa, keya, temp)
__UpperCamelCase : Optional[int] = temp[4:] + temp[:4]
__UpperCamelCase : Optional[Any] = function(expansion, sa, sa, keya, temp)
__UpperCamelCase : str = apply_table(temp, IP_inv)
print('Cipher text is:', CT)
# decryption
__UpperCamelCase : Tuple = apply_table(CT, IP)
__UpperCamelCase : Dict = function(expansion, sa, sa, keya, temp)
__UpperCamelCase : Optional[int] = temp[4:] + temp[:4]
__UpperCamelCase : Any = function(expansion, sa, sa, keya, temp)
__UpperCamelCase : Optional[Any] = apply_table(temp, IP_inv)
print('Plain text after decypting is:', PT)
| 519
|
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Optional[Any] ):
assert isinstance(lowerCAmelCase_, lowerCAmelCase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : str, lowerCAmelCase_ : int ):
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, cache_dir=lowerCAmelCase_, keep_in_memory=lowerCAmelCase_ ).read()
_check_text_dataset(lowerCAmelCase_, lowerCAmelCase_ )
@pytest.mark.parametrize(
'features', [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
], )
def a_ ( lowerCAmelCase_ : Any, lowerCAmelCase_ : Dict, lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(lowerCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, features=lowerCAmelCase_, cache_dir=lowerCAmelCase_ ).read()
_check_text_dataset(lowerCAmelCase_, lowerCAmelCase_ )
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Any ):
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, cache_dir=lowerCAmelCase_, split=lowerCAmelCase_ ).read()
_check_text_dataset(lowerCAmelCase_, lowerCAmelCase_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type', [str, list] )
def a_ ( lowerCAmelCase_ : Dict, lowerCAmelCase_ : Any, lowerCAmelCase_ : Dict ):
if issubclass(lowerCAmelCase_, lowerCAmelCase_ ):
__lowerCAmelCase = text_path
elif issubclass(lowerCAmelCase_, lowerCAmelCase_ ):
__lowerCAmelCase = [text_path]
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, cache_dir=lowerCAmelCase_ ).read()
_check_text_dataset(lowerCAmelCase_, lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : int, lowerCAmelCase_ : Tuple=("train",) ):
assert isinstance(lowerCAmelCase_, lowerCAmelCase_ )
for split in splits:
__lowerCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def a_ ( lowerCAmelCase_ : Tuple, lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Dict ):
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase = TextDatasetReader({'train': text_path}, cache_dir=lowerCAmelCase_, keep_in_memory=lowerCAmelCase_ ).read()
_check_text_datasetdict(lowerCAmelCase_, lowerCAmelCase_ )
@pytest.mark.parametrize(
'features', [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
], )
def a_ ( lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : List[Any] ):
__lowerCAmelCase = tmp_path / 'cache'
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(lowerCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = TextDatasetReader({'train': text_path}, features=lowerCAmelCase_, cache_dir=lowerCAmelCase_ ).read()
_check_text_datasetdict(lowerCAmelCase_, lowerCAmelCase_ )
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : str, lowerCAmelCase_ : Optional[int] ):
if split:
__lowerCAmelCase = {split: text_path}
else:
__lowerCAmelCase = 'train'
__lowerCAmelCase = {'train': text_path, 'test': text_path}
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, cache_dir=lowerCAmelCase_ ).read()
_check_text_datasetdict(lowerCAmelCase_, lowerCAmelCase_, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 53
| 0
|
'''simple docstring'''
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class _lowercase :
def __init__( self , A__ , A__ = 13 , A__ = 64 , A__ = 2 , A__ = 3 , A__ = 3 , A__ = True , A__ = True , A__ = 1_28 , A__=[16, 32, 64, 1_28] , A__ = 7 , A__ = 4 , A__ = 37 , A__ = "gelu" , A__ = 0.1 , A__ = 0.1 , A__ = 10 , A__ = 0.0_2 , A__ = 2 , A__ = 1 , A__ = 1_28 , A__ = [2, 2, 2, 2] , A__ = 2 , A__ = 2 , ) -> str:
snake_case = parent
snake_case = batch_size
snake_case = image_size
snake_case = patch_size
snake_case = num_channels
snake_case = is_training
snake_case = use_labels
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = encoder_stride
snake_case = num_attention_outputs
snake_case = embed_dim
snake_case = embed_dim + 1
snake_case = resolution
snake_case = depths
snake_case = hidden_sizes
snake_case = dim
snake_case = mlp_expansion_ratio
def UpperCamelCase ( self ) -> Dict:
snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self ) -> List[str]:
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def UpperCamelCase ( self , A__ , A__ , A__ ) -> List[str]:
snake_case = TFEfficientFormerModel(config=lowerCAmelCase_ )
snake_case = model(lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self , A__ , A__ , A__ ) -> Dict:
snake_case = self.type_sequence_label_size
snake_case = TFEfficientFormerForImageClassification(lowerCAmelCase_ )
snake_case = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case = 1
snake_case = TFEfficientFormerForImageClassification(lowerCAmelCase_ )
snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase ( self ) -> Any:
snake_case = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case = config_and_inputs
snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
_UpperCAmelCase = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
_UpperCAmelCase = (
{
'''feature-extraction''': TFEfficientFormerModel,
'''image-classification''': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def UpperCamelCase ( self ) -> List[str]:
snake_case = TFEfficientFormerModelTester(self )
snake_case = ConfigTester(
self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 )
def UpperCamelCase ( self ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''EfficientFormer does not use inputs_embeds''' )
def UpperCamelCase ( self ) -> Any:
pass
@unittest.skip(reason='''EfficientFormer does not support input and output embeddings''' )
def UpperCamelCase ( self ) -> Union[str, Any]:
pass
def UpperCamelCase ( self ) -> List[str]:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(lowerCAmelCase_ )
snake_case = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case = [*signature.parameters.keys()]
snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def UpperCamelCase ( self ) -> List[str]:
def check_hidden_states_output(A__ , A__ , A__ ):
snake_case = model_class(lowerCAmelCase_ )
snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
if hasattr(self.model_tester , '''encoder_seq_length''' ):
snake_case = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , '''chunk_length''' ) and self.model_tester.chunk_length > 1:
snake_case = seq_length * self.model_tester.chunk_length
else:
snake_case = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
snake_case = outputs.decoder_hidden_states
self.asseretIsInstance(lowerCAmelCase_ , (list, tuple) )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
snake_case = getattr(self.model_tester , '''seq_length''' , lowerCAmelCase_ )
snake_case = getattr(self.model_tester , '''decoder_seq_length''' , lowerCAmelCase_ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCamelCase ( self , A__ , A__ , A__=False ) -> Dict:
snake_case = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCamelCase ( self ) -> Dict:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
@unittest.skip(reason='''EfficientFormer does not implement masked image modeling yet''' )
def UpperCamelCase ( self ) -> Optional[int]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase_ )
def UpperCamelCase ( self ) -> Dict:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@slow
def UpperCamelCase ( self ) -> List[Any]:
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = TFEfficientFormerModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def UpperCamelCase ( self ) -> Any:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = True
snake_case = getattr(self.model_tester , '''seq_length''' , lowerCAmelCase_ )
snake_case = getattr(self.model_tester , '''encoder_seq_length''' , lowerCAmelCase_ )
snake_case = getattr(self.model_tester , '''key_length''' , lowerCAmelCase_ )
snake_case = getattr(self.model_tester , '''chunk_length''' , lowerCAmelCase_ )
if chunk_length is not None and hasattr(self.model_tester , '''num_hashes''' ):
snake_case = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
snake_case = True
snake_case = False
snake_case = True
snake_case = model_class(lowerCAmelCase_ )
snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case = True
snake_case = model_class(lowerCAmelCase_ )
snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def UpperCamelCase ( self ) -> List[str]:
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
snake_case = model_class(lowerCAmelCase_ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
snake_case = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=lowerCAmelCase_ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
snake_case = model(lowerCAmelCase_ )
self.assertTrue(outputs_dict is not None )
def __UpperCamelCase ( ) ->List[Any]:
snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self ) -> Optional[int]:
return (
EfficientFormerImageProcessor.from_pretrained('''snap-research/efficientformer-l1-300''' )
if is_vision_available()
else None
)
@slow
def UpperCamelCase ( self ) -> List[Any]:
snake_case = TFEfficientFormerForImageClassification.from_pretrained('''snap-research/efficientformer-l1-300''' )
snake_case = self.default_image_processor
snake_case = prepare_img()
snake_case = image_processor(images=lowerCAmelCase_ , return_tensors='''tf''' )
# forward pass
snake_case = model(**lowerCAmelCase_ , training=lowerCAmelCase_ )
# verify the logits
snake_case = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
snake_case = tf.constant([-0.0_5_5_5, 0.4_8_2_5, -0.0_8_5_2] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@slow
def UpperCamelCase ( self ) -> Optional[int]:
snake_case = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'''snap-research/efficientformer-l1-300''' )
snake_case = self.default_image_processor
snake_case = prepare_img()
snake_case = image_processor(images=lowerCAmelCase_ , return_tensors='''tf''' )
# forward pass
snake_case = model(**lowerCAmelCase_ , training=lowerCAmelCase_ )
# verify the logits
snake_case = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
snake_case = tf.constant([-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 342
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : Union[str, Any] = logging.get_logger(__name__)
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : int=False ):
__lowerCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__lowerCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def a_ ( lowerCAmelCase_ : Any, lowerCAmelCase_ : Tuple, lowerCAmelCase_ : Optional[int]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__lowerCAmelCase = ''
else:
__lowerCAmelCase = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[
: config.hidden_size, :
]
__lowerCAmelCase = in_proj_bias[: config.hidden_size]
__lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
__lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def a_ ( lowerCAmelCase_ : List[str] ):
__lowerCAmelCase = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_, lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : int, lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = dct.pop(lowerCAmelCase_ )
__lowerCAmelCase = val
def a_ ( ):
__lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCAmelCase = Image.open(requests.get(lowerCAmelCase_, stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Optional[Any]=True ):
__lowerCAmelCase = ViTConfig()
# patch_size
if model_name[-1] == "8":
__lowerCAmelCase = 8
# set labels if required
if not base_model:
__lowerCAmelCase = 1000
__lowerCAmelCase = 'huggingface/label-files'
__lowerCAmelCase = 'imagenet-1k-id2label.json'
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) )
__lowerCAmelCase = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
__lowerCAmelCase = 384
__lowerCAmelCase = 1536
__lowerCAmelCase = 12
__lowerCAmelCase = 6
# load original model from torch hub
__lowerCAmelCase = torch.hub.load('facebookresearch/dino:main', lowerCAmelCase_ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
__lowerCAmelCase = original_model.state_dict()
if base_model:
remove_classification_head_(lowerCAmelCase_ )
__lowerCAmelCase = create_rename_keys(lowerCAmelCase_, base_model=lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# load HuggingFace model
if base_model:
__lowerCAmelCase = ViTModel(lowerCAmelCase_, add_pooling_layer=lowerCAmelCase_ ).eval()
else:
__lowerCAmelCase = ViTForImageClassification(lowerCAmelCase_ ).eval()
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image, prepared by ViTImageProcessor
__lowerCAmelCase = ViTImageProcessor()
__lowerCAmelCase = image_processor(images=prepare_img(), return_tensors='pt' )
__lowerCAmelCase = encoding['pixel_values']
__lowerCAmelCase = model(lowerCAmelCase_ )
if base_model:
__lowerCAmelCase = original_model(lowerCAmelCase_ )
assert torch.allclose(lowerCAmelCase_, outputs.last_hidden_state[:, 0, :], atol=1E-1 )
else:
__lowerCAmelCase = original_model(lowerCAmelCase_ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase_, outputs.logits, atol=1E-3 )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_snake_case : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
_snake_case : List[Any] = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 53
| 0
|
def lowerCAmelCase__ ( _a : float , _a : float ):
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 568
|
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Union[str, Any] ) -> List[str]:
__lowerCAmelCase = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : str ) -> Any:
__lowerCAmelCase = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : Tuple ) -> Optional[int]:
__lowerCAmelCase = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : List[Any] ) -> List[str]:
__lowerCAmelCase = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : List[Any] ) -> int:
__lowerCAmelCase = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : str ) -> str:
__lowerCAmelCase = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : str ) -> List[Any]:
__lowerCAmelCase = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : str ) -> List[str]:
# pass variant but use the non-variant filenames
__lowerCAmelCase = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : str ) -> Union[str, Any]:
__lowerCAmelCase = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : str ) -> List[Any]:
__lowerCAmelCase = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : List[str] ) -> List[Any]:
# pass variant but use the non-variant filenames
__lowerCAmelCase = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
__lowerCAmelCase = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
| 53
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : Optional[int] = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 302
|
import math
def a_ ( lowerCAmelCase_ : list, lowerCAmelCase_ : int ):
__lowerCAmelCase = len(lowerCAmelCase_ )
__lowerCAmelCase = int(math.floor(math.sqrt(lowerCAmelCase_ ) ) )
__lowerCAmelCase = 0
while arr[min(lowerCAmelCase_, lowerCAmelCase_ ) - 1] < x:
__lowerCAmelCase = step
step += int(math.floor(math.sqrt(lowerCAmelCase_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
__lowerCAmelCase = prev + 1
if prev == min(lowerCAmelCase_, lowerCAmelCase_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
_snake_case : List[str] = input('Enter numbers separated by a comma:\n').strip()
_snake_case : Optional[Any] = [int(item) for item in user_input.split(',')]
_snake_case : List[str] = int(input('Enter the number to be searched:\n'))
_snake_case : Optional[int] = jump_search(arr, x)
if res == -1:
print('Number not found!')
else:
print(F"""Number {x} is at index {res}""")
| 53
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
_lowerCAmelCase: List[str] = None
_lowerCAmelCase: List[Any] = logging.get_logger(__name__)
_lowerCAmelCase: List[Any] = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
_lowerCAmelCase: int = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'
),
},
}
_lowerCAmelCase: List[str] = {
'moussaKam/mbarthez': 1_024,
'moussaKam/barthez': 1_024,
'moussaKam/barthez-orangesum-title': 1_024,
}
_lowerCAmelCase: str = '▁'
class lowercase_ (_UpperCamelCase ):
snake_case =VOCAB_FILES_NAMES
snake_case =PRETRAINED_VOCAB_FILES_MAP
snake_case =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case =['input_ids', 'attention_mask']
snake_case =BarthezTokenizer
def __init__( self , lowercase_=None , lowercase_=None , lowercase_="<s>" , lowercase_="</s>" , lowercase_="</s>" , lowercase_="<s>" , lowercase_="<unk>" , lowercase_="<pad>" , lowercase_="<mask>" , **lowercase_ , ) -> Optional[Any]:
# Mask token behave like a normal word, i.e. include the space before it
a__ =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else mask_token
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , **lowerCAmelCase_ , )
a__ =vocab_file
a__ =False if not self.vocab_file else True
def __UpperCamelCase ( self , lowercase_ , lowercase_ = None) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a__ =[self.cls_token_id]
a__ =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self , lowercase_ , lowercase_ = None) -> List[int]:
a__ =[self.sep_token_id]
a__ =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def __UpperCamelCase ( self , lowercase_ , lowercase_ = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(lowerCAmelCase_):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""")
return
a__ =os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase_):
copyfile(self.vocab_file , lowerCAmelCase_)
return (out_vocab_file,)
| 20
|
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : List[Any], lowerCAmelCase_ : str ):
# Initialise PyTorch model
__lowerCAmelCase = RemBertConfig.from_json_file(lowerCAmelCase_ )
print('Building PyTorch model from configuration: {}'.format(str(lowerCAmelCase_ ) ) )
__lowerCAmelCase = RemBertModel(lowerCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# Save pytorch-model
print('Save PyTorch model to {}'.format(lowerCAmelCase_ ) )
torch.save(model.state_dict(), lowerCAmelCase_ )
if __name__ == "__main__":
_snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_snake_case : int = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 53
| 0
|
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> List[str]:
if not isinstance(lowerCAmelCase_ ,lowerCAmelCase_ ):
raise ValueError("check_bouncy() accepts only integer arguments" )
lowercase__ : Union[str, Any] = str(lowerCAmelCase_ )
lowercase__ : Tuple = "".join(sorted(lowerCAmelCase_ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def snake_case_ ( SCREAMING_SNAKE_CASE_ = 99 ) -> Union[str, Any]:
if not 0 < percent < 1_00:
raise ValueError("solution() only accepts values from 0 to 100" )
lowercase__ : Any = 0
lowercase__ : Tuple = 1
while True:
if check_bouncy(lowerCAmelCase_ ):
bouncy_num += 1
if (bouncy_num / num) * 1_00 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'{solution(9_9)}')
| 397
|
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : Any = logging.get_logger(__name__)
def a_ ( lowerCAmelCase_ : str ):
__lowerCAmelCase = SwinConfig.from_pretrained(
'microsoft/swin-tiny-patch4-window7-224', out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
__lowerCAmelCase = MaskFormerConfig(backbone_config=lowerCAmelCase_ )
__lowerCAmelCase = 'huggingface/label-files'
if "ade20k-full" in model_name:
# this should be ok
__lowerCAmelCase = 847
__lowerCAmelCase = 'maskformer-ade20k-full-id2label.json'
elif "ade" in model_name:
# this should be ok
__lowerCAmelCase = 150
__lowerCAmelCase = 'ade20k-id2label.json'
elif "coco-stuff" in model_name:
# this should be ok
__lowerCAmelCase = 171
__lowerCAmelCase = 'maskformer-coco-stuff-id2label.json'
elif "coco" in model_name:
# TODO
__lowerCAmelCase = 133
__lowerCAmelCase = 'coco-panoptic-id2label.json'
elif "cityscapes" in model_name:
# this should be ok
__lowerCAmelCase = 19
__lowerCAmelCase = 'cityscapes-id2label.json'
elif "vistas" in model_name:
# this should be ok
__lowerCAmelCase = 65
__lowerCAmelCase = 'mapillary-vistas-id2label.json'
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) )
__lowerCAmelCase = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
return config
def a_ ( lowerCAmelCase_ : Tuple ):
__lowerCAmelCase = []
# stem
# fmt: off
rename_keys.append(('backbone.patch_embed.proj.weight', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.proj.bias', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'model.pixel_level_module.encoder.model.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'model.pixel_level_module.encoder.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(('sem_seg_head.layer_4.weight', 'model.pixel_level_module.decoder.fpn.stem.0.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.weight', 'model.pixel_level_module.decoder.fpn.stem.1.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.bias', 'model.pixel_level_module.decoder.fpn.stem.1.bias') )
for source_index, target_index in zip(range(3, 0, -1 ), range(0, 3 ) ):
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(('sem_seg_head.mask_features.weight', 'model.pixel_level_module.decoder.mask_projection.weight') )
rename_keys.append(('sem_seg_head.mask_features.bias', 'model.pixel_level_module.decoder.mask_projection.bias') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.weight', 'model.transformer_module.decoder.layernorm.weight') )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.bias', 'model.transformer_module.decoder.layernorm.bias') )
# heads on top
rename_keys.append(('sem_seg_head.predictor.query_embed.weight', 'model.transformer_module.queries_embedder.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.weight', 'model.transformer_module.input_projection.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.bias', 'model.transformer_module.input_projection.bias') )
rename_keys.append(('sem_seg_head.predictor.class_embed.weight', 'class_predictor.weight') )
rename_keys.append(('sem_seg_head.predictor.class_embed.bias', 'class_predictor.bias') )
for i in range(3 ):
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Tuple ):
__lowerCAmelCase = dct.pop(lowerCAmelCase_ )
__lowerCAmelCase = val
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : int ):
__lowerCAmelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowerCAmelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
__lowerCAmelCase = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[:dim, :]
__lowerCAmelCase = in_proj_bias[: dim]
__lowerCAmelCase = in_proj_weight[
dim : dim * 2, :
]
__lowerCAmelCase = in_proj_bias[
dim : dim * 2
]
__lowerCAmelCase = in_proj_weight[
-dim :, :
]
__lowerCAmelCase = in_proj_bias[-dim :]
# fmt: on
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : Dict ):
# fmt: off
__lowerCAmelCase = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
__lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[: hidden_size, :]
__lowerCAmelCase = in_proj_bias[:config.hidden_size]
__lowerCAmelCase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowerCAmelCase = in_proj_bias[hidden_size : hidden_size * 2]
__lowerCAmelCase = in_proj_weight[-hidden_size :, :]
__lowerCAmelCase = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
__lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[: hidden_size, :]
__lowerCAmelCase = in_proj_bias[:config.hidden_size]
__lowerCAmelCase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowerCAmelCase = in_proj_bias[hidden_size : hidden_size * 2]
__lowerCAmelCase = in_proj_weight[-hidden_size :, :]
__lowerCAmelCase = in_proj_bias[-hidden_size :]
# fmt: on
def a_ ( ):
__lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCAmelCase = Image.open(requests.get(lowerCAmelCase_, stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : str, lowerCAmelCase_ : str, lowerCAmelCase_ : bool = False ):
__lowerCAmelCase = get_maskformer_config(lowerCAmelCase_ )
# load original state_dict
with open(lowerCAmelCase_, 'rb' ) as f:
__lowerCAmelCase = pickle.load(lowerCAmelCase_ )
__lowerCAmelCase = data['model']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__lowerCAmelCase = create_rename_keys(lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
read_in_swin_q_k_v(lowerCAmelCase_, config.backbone_config )
read_in_decoder_q_k_v(lowerCAmelCase_, lowerCAmelCase_ )
# update to torch tensors
for key, value in state_dict.items():
__lowerCAmelCase = torch.from_numpy(lowerCAmelCase_ )
# load 🤗 model
__lowerCAmelCase = MaskFormerForInstanceSegmentation(lowerCAmelCase_ )
model.eval()
for name, param in model.named_parameters():
print(lowerCAmelCase_, param.shape )
__lowerCAmelCase , __lowerCAmelCase = model.load_state_dict(lowerCAmelCase_, strict=lowerCAmelCase_ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCAmelCase_ ) == 0, F"""Unexpected keys: {unexpected_keys}"""
# verify results
__lowerCAmelCase = prepare_img()
if "vistas" in model_name:
__lowerCAmelCase = 65
elif "cityscapes" in model_name:
__lowerCAmelCase = 6_5535
else:
__lowerCAmelCase = 255
__lowerCAmelCase = True if 'ade' in model_name else False
__lowerCAmelCase = MaskFormerImageProcessor(ignore_index=lowerCAmelCase_, reduce_labels=lowerCAmelCase_ )
__lowerCAmelCase = image_processor(lowerCAmelCase_, return_tensors='pt' )
__lowerCAmelCase = model(**lowerCAmelCase_ )
print('Logits:', outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__lowerCAmelCase = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3], lowerCAmelCase_, atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
print('Pushing model and image processor to the hub...' )
model.push_to_hub(F"""nielsr/{model_name}""" )
image_processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
_snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_snake_case : List[str] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 53
| 0
|
from math import ceil
def UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ ) -> Optional[Any]:
'''simple docstring'''
_lowercase : str = list(range(0 , lowerCAmelCase_ ) )
_lowercase : List[str] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
_lowercase : Dict = []
for i in device_map_blocks:
if device_map_blocks.count(lowerCAmelCase_ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(lowerCAmelCase_ )
# Missing blocks
_lowercase : int = [i for i in blocks if i not in device_map_blocks]
_lowercase : Tuple = [i for i in device_map_blocks if i not in blocks]
if len(lowerCAmelCase_ ) != 0:
raise ValueError(
'''Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'''
''' These attention blocks were specified more than once: ''' + str(lowerCAmelCase_ ) )
if len(lowerCAmelCase_ ) != 0:
raise ValueError(
'''There are attention blocks for this model that are not specified in the device_map. Add these attention '''
'''blocks to a device on the device_map: ''' + str(lowerCAmelCase_ ) )
if len(lowerCAmelCase_ ) != 0:
raise ValueError(
'''The device_map contains more attention blocks than this model has. Remove these from the device_map:'''
+ str(lowerCAmelCase_ ) )
def UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ ) -> Tuple:
'''simple docstring'''
_lowercase : Optional[Any] = list(range(lowerCAmelCase_ ) )
_lowercase : Optional[int] = int(ceil(n_layers / len(lowerCAmelCase_ ) ) )
_lowercase : Tuple = [layers[i : i + n_blocks] for i in range(0 , lowerCAmelCase_ , lowerCAmelCase_ )]
return dict(zip(lowerCAmelCase_ , lowerCAmelCase_ ) )
| 322
|
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
_snake_case : List[Any] = True
from torch.cuda.amp import autocast
_snake_case : Dict = logging.getLogger(__name__)
def a_ ( lowerCAmelCase_ : str=None, lowerCAmelCase_ : str=None ):
return field(default_factory=lambda: default, metadata=lowerCAmelCase_ )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
a_ = field(
default=0.1 , metadata={"""help""": """The dropout ratio for the attention probabilities."""} )
a_ = field(
default=0.1 , metadata={"""help""": """The dropout ratio for activations inside the fully connected layer."""} )
a_ = field(
default=0.1 , metadata={
"""help""": """The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."""
} , )
a_ = field(
default=0.1 , metadata={"""help""": """The dropout probabilitiy for all 1D convolutional layers in feature extractor."""} , )
a_ = field(
default=0.05 , metadata={
"""help""": (
"""Propability of each feature vector along the time axis to be chosen as the start of the vector"""
"""span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"""
"""vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."""
)
} , )
a_ = field(default=0.0 , metadata={"""help""": """The LayerDrop probability."""} )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
a_ = field(
default="""train+validation""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of validation examples to this """
"""value if set."""
)
} , )
a_ = list_field(
default=[""",""", """?""", """.""", """!""", """-""", """;""", """:""", """\"\"""", """%""", """'""", """\"""", """�"""] , metadata={"""help""": """A list of characters to remove from the transcripts."""} , )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = 42
a_ = True
a_ = None
a_ = None
a_ = None
a_ = None
def __call__( self : int , lowerCAmelCase_ : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
__lowerCAmelCase = [{'input_values': feature['input_values']} for feature in features]
__lowerCAmelCase = [{'input_ids': feature['labels']} for feature in features]
__lowerCAmelCase = self.processor.pad(
lowerCAmelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
__lowerCAmelCase = self.processor.pad(
labels=lowerCAmelCase_ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='pt' , )
# replace padding with -100 to ignore loss correctly
__lowerCAmelCase = labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_0_0 )
__lowerCAmelCase = labels
return batch
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def lowercase ( self : Tuple , lowerCAmelCase_ : nn.Module , lowerCAmelCase_ : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
model.train()
__lowerCAmelCase = self._prepare_inputs(lowerCAmelCase_ )
if self.use_amp:
with autocast():
__lowerCAmelCase = self.compute_loss(lowerCAmelCase_ , lowerCAmelCase_ )
else:
__lowerCAmelCase = self.compute_loss(lowerCAmelCase_ , lowerCAmelCase_ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
__lowerCAmelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__lowerCAmelCase = loss.sum() / (inputs['labels'] >= 0).sum()
else:
raise ValueError(f"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
__lowerCAmelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowerCAmelCase_ ).backward()
elif self.use_apex:
with amp.scale_loss(lowerCAmelCase_ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowerCAmelCase_ )
else:
loss.backward()
return loss.detach()
def a_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout )], )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s', lowerCAmelCase_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
__lowerCAmelCase = datasets.load_dataset(
'common_voice', data_args.dataset_config_name, split=data_args.train_split_name )
__lowerCAmelCase = datasets.load_dataset('common_voice', data_args.dataset_config_name, split='test' )
# Create and save tokenizer
__lowerCAmelCase = F"""[{"".join(data_args.chars_to_ignore )}]"""
def remove_special_characters(lowerCAmelCase_ : Any ):
__lowerCAmelCase = re.sub(lowerCAmelCase_, '', batch['sentence'] ).lower() + ' '
return batch
__lowerCAmelCase = train_dataset.map(lowerCAmelCase_, remove_columns=['sentence'] )
__lowerCAmelCase = eval_dataset.map(lowerCAmelCase_, remove_columns=['sentence'] )
def extract_all_chars(lowerCAmelCase_ : Tuple ):
__lowerCAmelCase = ' '.join(batch['text'] )
__lowerCAmelCase = list(set(lowerCAmelCase_ ) )
return {"vocab": [vocab], "all_text": [all_text]}
__lowerCAmelCase = train_dataset.map(
lowerCAmelCase_, batched=lowerCAmelCase_, batch_size=-1, keep_in_memory=lowerCAmelCase_, remove_columns=train_dataset.column_names, )
__lowerCAmelCase = train_dataset.map(
lowerCAmelCase_, batched=lowerCAmelCase_, batch_size=-1, keep_in_memory=lowerCAmelCase_, remove_columns=eval_dataset.column_names, )
__lowerCAmelCase = list(set(vocab_train['vocab'][0] ) | set(vocab_test['vocab'][0] ) )
__lowerCAmelCase = {v: k for k, v in enumerate(lowerCAmelCase_ )}
__lowerCAmelCase = vocab_dict[' ']
del vocab_dict[" "]
__lowerCAmelCase = len(lowerCAmelCase_ )
__lowerCAmelCase = len(lowerCAmelCase_ )
with open('vocab.json', 'w' ) as vocab_file:
json.dump(lowerCAmelCase_, lowerCAmelCase_ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase = WavaVecaCTCTokenizer(
'vocab.json', unk_token='[UNK]', pad_token='[PAD]', word_delimiter_token='|', )
__lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=1_6000, padding_value=0.0, do_normalize=lowerCAmelCase_, return_attention_mask=lowerCAmelCase_ )
__lowerCAmelCase = WavaVecaProcessor(feature_extractor=lowerCAmelCase_, tokenizer=lowerCAmelCase_ )
__lowerCAmelCase = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path, cache_dir=model_args.cache_dir, activation_dropout=model_args.activation_dropout, attention_dropout=model_args.attention_dropout, hidden_dropout=model_args.hidden_dropout, feat_proj_dropout=model_args.feat_proj_dropout, mask_time_prob=model_args.mask_time_prob, gradient_checkpointing=training_args.gradient_checkpointing, layerdrop=model_args.layerdrop, ctc_loss_reduction='mean', pad_token_id=processor.tokenizer.pad_token_id, vocab_size=len(processor.tokenizer ), )
if data_args.max_train_samples is not None:
__lowerCAmelCase = min(len(lowerCAmelCase_ ), data_args.max_train_samples )
__lowerCAmelCase = train_dataset.select(range(lowerCAmelCase_ ) )
if data_args.max_val_samples is not None:
__lowerCAmelCase = eval_dataset.select(range(data_args.max_val_samples ) )
__lowerCAmelCase = torchaudio.transforms.Resample(4_8000, 1_6000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(lowerCAmelCase_ : int ):
__lowerCAmelCase , __lowerCAmelCase = torchaudio.load(batch['path'] )
__lowerCAmelCase = resampler(lowerCAmelCase_ ).squeeze().numpy()
__lowerCAmelCase = 1_6000
__lowerCAmelCase = batch['text']
return batch
__lowerCAmelCase = train_dataset.map(
lowerCAmelCase_, remove_columns=train_dataset.column_names, num_proc=data_args.preprocessing_num_workers, )
__lowerCAmelCase = eval_dataset.map(
lowerCAmelCase_, remove_columns=eval_dataset.column_names, num_proc=data_args.preprocessing_num_workers, )
def prepare_dataset(lowerCAmelCase_ : Union[str, Any] ):
# check that all files have the correct sampling rate
assert (
len(set(batch['sampling_rate'] ) ) == 1
), F"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."""
__lowerCAmelCase = processor(
audio=batch['speech'], text=batch['target_text'], sampling_rate=batch['sampling_rate'][0] )
batch.update(lowerCAmelCase_ )
return batch
__lowerCAmelCase = train_dataset.map(
lowerCAmelCase_, remove_columns=train_dataset.column_names, batch_size=training_args.per_device_train_batch_size, batched=lowerCAmelCase_, num_proc=data_args.preprocessing_num_workers, )
__lowerCAmelCase = eval_dataset.map(
lowerCAmelCase_, remove_columns=eval_dataset.column_names, batch_size=training_args.per_device_train_batch_size, batched=lowerCAmelCase_, num_proc=data_args.preprocessing_num_workers, )
# Metric
__lowerCAmelCase = datasets.load_metric('wer' )
def compute_metrics(lowerCAmelCase_ : Optional[Any] ):
__lowerCAmelCase = pred.predictions
__lowerCAmelCase = np.argmax(lowerCAmelCase_, axis=-1 )
__lowerCAmelCase = processor.tokenizer.pad_token_id
__lowerCAmelCase = processor.batch_decode(lowerCAmelCase_ )
# we do not want to group tokens when computing the metrics
__lowerCAmelCase = processor.batch_decode(pred.label_ids, group_tokens=lowerCAmelCase_ )
__lowerCAmelCase = wer_metric.compute(predictions=lowerCAmelCase_, references=lowerCAmelCase_ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
__lowerCAmelCase = DataCollatorCTCWithPadding(processor=lowerCAmelCase_, padding=lowerCAmelCase_ )
# Initialize our Trainer
__lowerCAmelCase = CTCTrainer(
model=lowerCAmelCase_, data_collator=lowerCAmelCase_, args=lowerCAmelCase_, compute_metrics=lowerCAmelCase_, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=processor.feature_extractor, )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__lowerCAmelCase = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
__lowerCAmelCase = model_args.model_name_or_path
else:
__lowerCAmelCase = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
__lowerCAmelCase = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
trainer.save_model()
__lowerCAmelCase = train_result.metrics
__lowerCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase_ )
)
__lowerCAmelCase = min(lowerCAmelCase_, len(lowerCAmelCase_ ) )
trainer.log_metrics('train', lowerCAmelCase_ )
trainer.save_metrics('train', lowerCAmelCase_ )
trainer.save_state()
# Evaluation
__lowerCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowerCAmelCase = trainer.evaluate()
__lowerCAmelCase = data_args.max_val_samples if data_args.max_val_samples is not None else len(lowerCAmelCase_ )
__lowerCAmelCase = min(lowerCAmelCase_, len(lowerCAmelCase_ ) )
trainer.log_metrics('eval', lowerCAmelCase_ )
trainer.save_metrics('eval', lowerCAmelCase_ )
return results
if __name__ == "__main__":
main()
| 53
| 0
|
from math import isclose, sqrt
def _lowerCamelCase ( a_ : float , a_ : float , a_ : float):
lowerCamelCase :Dict = point_y / 4 / point_x
lowerCamelCase :int = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowerCamelCase :Any = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowerCamelCase :Optional[Any] = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowerCamelCase :Union[str, Any] = outgoing_gradient**2 + 4
lowerCamelCase :Dict = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowerCamelCase :str = (point_y - outgoing_gradient * point_x) ** 2 - 1_00
lowerCamelCase :int = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term)
) / (2 * quadratic_term)
lowerCamelCase :Tuple = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term)
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowerCamelCase :Optional[int] = x_minus if isclose(lowerCAmelCase_ , lowerCAmelCase_) else x_plus
lowerCamelCase :Optional[int] = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def _lowerCamelCase ( a_ : float = 1.4 , a_ : float = -9.6):
lowerCamelCase :Optional[int] = 0
lowerCamelCase :List[Any] = first_x_coord
lowerCamelCase :Tuple = first_y_coord
lowerCamelCase :Optional[int] = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
lowerCamelCase , lowerCamelCase , lowerCamelCase :Optional[int] = next_point(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F'{solution() = }')
| 166
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
_snake_case : Any = logging.get_logger(__name__)
_snake_case : int = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_snake_case : Optional[Any] = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
_snake_case : str = {
'yjernite/retribert-base-uncased': 512,
}
_snake_case : Optional[int] = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = PRETRAINED_INIT_CONFIGURATION
a_ = RetriBertTokenizer
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self : Dict , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : str="[UNK]" , lowerCAmelCase_ : Optional[Any]="[SEP]" , lowerCAmelCase_ : List[str]="[PAD]" , lowerCAmelCase_ : Optional[int]="[CLS]" , lowerCAmelCase_ : List[Any]="[MASK]" , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : List[str]=None , **lowerCAmelCase_ : List[Any] , ) -> Dict:
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
__lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get('strip_accents' , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowerCAmelCase_ ) != tokenize_chinese_chars
):
__lowerCAmelCase = getattr(lowerCAmelCase_ , normalizer_state.pop('type' ) )
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = strip_accents
__lowerCAmelCase = tokenize_chinese_chars
__lowerCAmelCase = normalizer_class(**lowerCAmelCase_ )
__lowerCAmelCase = do_lower_case
def lowercase ( self : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int]=None ) -> Optional[int]:
__lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
__lowerCAmelCase = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 53
| 0
|
import heapq
import sys
import numpy as np
lowerCamelCase : Any = tuple[int, int]
class __lowercase :
"""simple docstring"""
def __init__( self ) -> str:
snake_case : List[Any] = []
snake_case : Tuple = set()
def UpperCAmelCase ( self ) -> Tuple:
if not self.empty():
return self.elements[0][0]
else:
return float("""inf""" )
def UpperCAmelCase ( self ) -> List[str]:
return len(self.elements ) == 0
def UpperCAmelCase ( self , A , A ) -> Dict:
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(lowerCAmelCase_ )
else:
# update
# print("update", item)
snake_case : int = []
((snake_case) , (snake_case)) : str = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((snake_case) , (snake_case)) : int = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def UpperCAmelCase ( self , A ) -> Dict:
if item in self.set:
self.set.remove(lowerCAmelCase_ )
snake_case : Any = []
((snake_case) , (snake_case)) : Tuple = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((snake_case) , (snake_case)) : List[Any] = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def UpperCAmelCase ( self ) -> str:
return self.elements[0][1]
def UpperCAmelCase ( self ) -> Dict:
((snake_case) , (snake_case)) : str = heapq.heappop(self.elements )
self.set.remove(lowerCAmelCase_ )
return (priority, item)
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Tuple:
# euclidean distance
snake_case : Tuple = np.array(lowerCAmelCase_ )
snake_case : Any = np.array(lowerCAmelCase_ )
return np.linalg.norm(a - b )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> str:
# integer division by time variable
return consistent_heuristic(lowerCAmelCase_ ,lowerCAmelCase_ ) // t
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Optional[Any]:
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ) -> List[Any]:
snake_case : str = g_function[start] + Wa * heuristics[i](lowerCAmelCase_ ,lowerCAmelCase_ )
return ans
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> List[str]:
snake_case : List[Any] = np.chararray((n, n) )
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
snake_case : List[Any] = """*"""
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
if (j, (n - 1) - i) in blocks:
snake_case : List[str] = """#"""
snake_case : int = """-"""
snake_case : Dict = back_pointer[goal]
while x != start:
((snake_case) , (snake_case)) : Union[str, Any] = x
# print(x)
snake_case : Union[str, Any] = """-"""
snake_case : Union[str, Any] = back_pointer[x]
snake_case : int = """-"""
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
if (i, j) == (0, n - 1):
print(grid[i][j] ,end=""" """ )
print("""<-- End position""" ,end=""" """ )
else:
print(grid[i][j] ,end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
print("""PATH TAKEN BY THE ALGORITHM IS:-""" )
snake_case : int = back_pointer[goal]
while x != start:
print(lowerCAmelCase_ ,end=""" """ )
snake_case : str = back_pointer[x]
print(lowerCAmelCase_ )
sys.exit()
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Union[str, Any]:
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,) -> Tuple:
for itera in range(lowerCAmelCase_ ):
open_list[itera].remove_element(lowerCAmelCase_ )
# print("s", s)
# print("j", j)
((snake_case) , (snake_case)) : Optional[int] = s
snake_case : Any = (x - 1, y)
snake_case : Tuple = (x + 1, y)
snake_case : List[Any] = (x, y + 1)
snake_case : Any = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(lowerCAmelCase_ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(lowerCAmelCase_ )
snake_case : Optional[Any] = -1
snake_case : Any = float("""inf""" )
if valid(lowerCAmelCase_ ) and g_function[neighbours] > g_function[s] + 1:
snake_case : Optional[Any] = g_function[s] + 1
snake_case : Optional[Any] = s
if neighbours not in close_list_anchor:
open_list[0].put(lowerCAmelCase_ ,key(lowerCAmelCase_ ,0 ,lowerCAmelCase_ ,lowerCAmelCase_ ) )
if neighbours not in close_list_inad:
for var in range(1 ,lowerCAmelCase_ ):
if key(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ) <= Wa * key(
lowerCAmelCase_ ,0 ,lowerCAmelCase_ ,lowerCAmelCase_ ):
open_list[j].put(
lowerCAmelCase_ ,key(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ) )
def SCREAMING_SNAKE_CASE__ ( ) -> int:
snake_case : Dict = []
for x in range(1 ,5 ):
for y in range(1 ,6 ):
some_list.append((x, y) )
for x in range(15 ,20 ):
some_list.append((x, 17) )
for x in range(10 ,19 ):
for y in range(1 ,15 ):
some_list.append((x, y) )
# L block
for x in range(1 ,4 ):
for y in range(12 ,19 ):
some_list.append((x, y) )
for x in range(3 ,13 ):
for y in range(16 ,19 ):
some_list.append((x, y) )
return some_list
lowerCamelCase : Dict = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
lowerCamelCase : int = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(1_0, 1),
(1_1, 1),
(1_2, 1),
(1_3, 1),
(1_4, 1),
(1_5, 1),
(1_6, 1),
(1_7, 1),
(1_8, 1),
(1_9, 1),
]
lowerCamelCase : Tuple = make_common_ground()
lowerCamelCase : str = blocks_blk
# hyper parameters
lowerCamelCase : Union[str, Any] = 1
lowerCamelCase : Optional[Any] = 1
lowerCamelCase : Dict = 2_0
lowerCamelCase : List[Any] = 3 # one consistent and two other inconsistent
# start and end destination
lowerCamelCase : int = (0, 0)
lowerCamelCase : List[Any] = (n - 1, n - 1)
lowerCamelCase : List[str] = 1
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Optional[int]:
snake_case : str = {start: 0, goal: float("""inf""" )}
snake_case : str = {start: -1, goal: -1}
snake_case : Optional[int] = []
snake_case : List[str] = set()
for i in range(lowerCAmelCase_ ):
open_list.append(PriorityQueue() )
open_list[i].put(lowerCAmelCase_ ,key(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ) )
snake_case : Tuple = []
snake_case : Tuple = []
while open_list[0].minkey() < float("""inf""" ):
for i in range(1 ,lowerCAmelCase_ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("""inf""" ):
do_something(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
else:
snake_case , snake_case : List[Any] = open_list[i].top_show()
visited.add(lowerCAmelCase_ )
expand_state(
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,)
close_list_inad.append(lowerCAmelCase_ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("""inf""" ):
do_something(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
else:
snake_case : List[str] = open_list[0].top_show()
visited.add(lowerCAmelCase_ )
expand_state(
lowerCAmelCase_ ,0 ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,)
close_list_anchor.append(lowerCAmelCase_ )
print("""No path found to goal""" )
print()
for i in range(n - 1 ,-1 ,-1 ):
for j in range(lowerCAmelCase_ ):
if (j, i) in blocks:
print("""#""" ,end=""" """ )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("""*""" ,end=""" """ )
else:
print("""-""" ,end=""" """ )
else:
print("""*""" ,end=""" """ )
if (j, i) == (n - 1, n - 1):
print("""<-- End position""" ,end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 587
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
_snake_case : Union[str, Any] = imread(R'digital_image_processing/image_data/lena_small.jpg')
_snake_case : Optional[int] = cvtColor(img, COLOR_BGR2GRAY)
def a_ ( ):
__lowerCAmelCase = cn.convert_to_negative(lowerCAmelCase_ )
# assert negative_img array for at least one True
assert negative_img.any()
def a_ ( ):
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowerCAmelCase_, 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def a_ ( ):
__lowerCAmelCase = canny.gen_gaussian_kernel(9, sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def a_ ( ):
__lowerCAmelCase = imread('digital_image_processing/image_data/lena_small.jpg', 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__lowerCAmelCase = canny.canny(lowerCAmelCase_ )
# assert canny array for at least one True
assert canny_array.any()
def a_ ( ):
assert gg.gaussian_filter(lowerCAmelCase_, 5, sigma=0.9 ).all()
def a_ ( ):
# laplace diagonals
__lowerCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__lowerCAmelCase = conv.img_convolve(lowerCAmelCase_, lowerCAmelCase_ ).astype(lowerCAmelCase_ )
assert res.any()
def a_ ( ):
assert med.median_filter(lowerCAmelCase_, 3 ).any()
def a_ ( ):
__lowerCAmelCase , __lowerCAmelCase = sob.sobel_filter(lowerCAmelCase_ )
assert grad.any() and theta.any()
def a_ ( ):
__lowerCAmelCase = sp.make_sepia(lowerCAmelCase_, 20 )
assert sepia.all()
def a_ ( lowerCAmelCase_ : str = "digital_image_processing/image_data/lena_small.jpg" ):
__lowerCAmelCase = bs.Burkes(imread(lowerCAmelCase_, 1 ), 120 )
burkes.process()
assert burkes.output_img.any()
def a_ ( lowerCAmelCase_ : str = "digital_image_processing/image_data/lena_small.jpg", ):
__lowerCAmelCase = rs.NearestNeighbour(imread(lowerCAmelCase_, 1 ), 400, 200 )
nn.process()
assert nn.output.any()
def a_ ( ):
__lowerCAmelCase = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
__lowerCAmelCase = imread(lowerCAmelCase_, 0 )
# Test for get_neighbors_pixel function() return not None
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = image[x_coordinate][y_coordinate]
__lowerCAmelCase = lbp.get_neighbors_pixel(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__lowerCAmelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0, image.shape[0] ):
for j in range(0, image.shape[1] ):
__lowerCAmelCase = lbp.local_binary_value(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
assert lbp_image.any()
| 53
| 0
|
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _a ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Dict = BioGptTokenizer
lowerCamelCase_ : int = False
def __UpperCAmelCase( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__A : List[str] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
__A : int = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
__A : Optional[Any] = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
__A : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__A : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(lowerCAmelCase_ ) )
def __UpperCAmelCase( self , __UpperCAmelCase ):
__A : Any = "lower newer"
__A : Optional[int] = "lower newer"
return input_text, output_text
def __UpperCAmelCase( self ):
__A : Any = BioGptTokenizer(self.vocab_file , self.merges_file )
__A : Tuple = "lower"
__A : str = ["low", "er</w>"]
__A : int = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
__A : List[Any] = tokens + ["<unk>"]
__A : Tuple = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , lowerCAmelCase_ )
@slow
def __UpperCAmelCase( self ):
__A : Any = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
__A : int = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase_ )
__A : Optional[int] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase_ )
__A : Any = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
__A : List[Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 520
|
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_snake_case : List[Any] = logging.get_logger(__name__)
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = ["""pixel_values"""]
def __init__( self : Optional[int] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_5_5 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , lowerCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **lowerCAmelCase_ : Any , ) -> None:
super().__init__(**lowerCAmelCase_ )
__lowerCAmelCase = size if size is not None else {'shortest_edge': 2_2_4}
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
__lowerCAmelCase = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , param_name='crop_size' )
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
__lowerCAmelCase = resample
__lowerCAmelCase = do_center_crop
__lowerCAmelCase = crop_size
__lowerCAmelCase = do_rescale
__lowerCAmelCase = rescale_factor
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__lowerCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowercase ( self : Dict , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Optional[int] , ) -> np.ndarray:
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
__lowerCAmelCase = int((2_5_6 / 2_2_4) * size['shortest_edge'] )
__lowerCAmelCase = get_resize_output_image_size(lowerCAmelCase_ , size=lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
__lowerCAmelCase = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
lowerCAmelCase_ , size=(size_dict['height'], size_dict['width']) , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : str , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : str , ) -> np.ndarray:
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(lowerCAmelCase_ , size=(size['height'], size['width']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : Dict , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[int, float] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : int , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[str] , ) -> np.ndarray:
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , lowerCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , lowerCAmelCase_ : Optional[TensorType] = None , lowerCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase_ : str , ) -> BatchFeature:
__lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase = resample if resample is not None else self.resample
__lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase = image_std if image_std is not None else self.image_std
__lowerCAmelCase = size if size is not None else self.size
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
__lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , param_name='crop_size' )
__lowerCAmelCase = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__lowerCAmelCase = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
__lowerCAmelCase = [self.resize(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
if do_center_crop:
__lowerCAmelCase = [self.center_crop(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
if do_rescale:
__lowerCAmelCase = [self.rescale(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
if do_normalize:
__lowerCAmelCase = [self.normalize(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
__lowerCAmelCase = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
__lowerCAmelCase = {'pixel_values': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 53
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__UpperCamelCase : List[str] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__UpperCamelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 248
|
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Optional[int]=8 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[Any]=9_9 , lowerCAmelCase_ : List[Any]=1_6 , lowerCAmelCase_ : int=5 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : str=3_6 , lowerCAmelCase_ : Optional[int]="gelu" , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : str=5_1_2 , lowerCAmelCase_ : List[str]=1_6 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : Tuple=0.02 , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : List[Any]=4 , lowerCAmelCase_ : List[str]=None , ) -> List[Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = scope
def lowercase ( self : Optional[int] ) -> Dict:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self : Any ) -> Union[str, Any]:
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def lowercase ( self : Dict ) -> List[Any]:
__lowerCAmelCase = self.get_config()
__lowerCAmelCase = 3_0_0
return config
def lowercase ( self : Optional[int] ) -> Union[str, Any]:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
__lowerCAmelCase = True
__lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase ( self : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple ) -> List[str]:
__lowerCAmelCase = MraModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , ) -> Tuple:
__lowerCAmelCase = True
__lowerCAmelCase = MraModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )
__lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , )
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple ) -> List[str]:
__lowerCAmelCase = MraForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict ) -> str:
__lowerCAmelCase = MraForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict ) -> Optional[Any]:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = MraForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict ) -> Any:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = MraForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] ) -> List[Any]:
__lowerCAmelCase = self.num_choices
__lowerCAmelCase = MraForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self : Tuple ) -> Optional[Any]:
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
a_ = False
a_ = False
a_ = False
a_ = False
a_ = ()
def lowercase ( self : List[Any] ) -> Optional[Any]:
__lowerCAmelCase = MraModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=3_7 )
def lowercase ( self : Tuple ) -> List[str]:
self.config_tester.run_common_tests()
def lowercase ( self : Optional[int] ) -> Tuple:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : int ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase = type
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase_ )
def lowercase ( self : List[str] ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase_ )
def lowercase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase_ )
def lowercase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase_ )
def lowercase ( self : Tuple ) -> str:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase_ )
@slow
def lowercase ( self : Optional[int] ) -> Optional[int]:
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = MraModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@unittest.skip(reason='MRA does not output attentions' )
def lowercase ( self : Optional[int] ) -> Tuple:
return
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase ( self : Optional[Any] ) -> List[str]:
__lowerCAmelCase = MraModel.from_pretrained('uw-madison/mra-base-512-4' )
__lowerCAmelCase = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
__lowerCAmelCase = model(lowerCAmelCase_ )[0]
__lowerCAmelCase = torch.Size((1, 2_5_6, 7_6_8) )
self.assertEqual(output.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@slow
def lowercase ( self : int ) -> Optional[int]:
__lowerCAmelCase = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' )
__lowerCAmelCase = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
__lowerCAmelCase = model(lowerCAmelCase_ )[0]
__lowerCAmelCase = 5_0_2_6_5
__lowerCAmelCase = torch.Size((1, 2_5_6, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@slow
def lowercase ( self : Any ) -> List[str]:
__lowerCAmelCase = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' )
__lowerCAmelCase = torch.arange(4_0_9_6 ).unsqueeze(0 )
with torch.no_grad():
__lowerCAmelCase = model(lowerCAmelCase_ )[0]
__lowerCAmelCase = 5_0_2_6_5
__lowerCAmelCase = torch.Size((1, 4_0_9_6, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 53
| 0
|
from sklearn.metrics import recall_score
import datasets
__UpperCamelCase : Dict = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'
__UpperCamelCase : Union[str, Any] = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'
__UpperCamelCase : str = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION,_KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
'''simple docstring'''
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def _snake_case ( self : Tuple , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : str=1 , _lowerCamelCase : Optional[Any]="binary" , _lowerCamelCase : List[str]=None , _lowerCamelCase : str="warn" , ):
'''simple docstring'''
__lowerCamelCase : Any = recall_score(
lowerCAmelCase_ , lowerCAmelCase_ , labels=lowerCAmelCase_ , pos_label=lowerCAmelCase_ , average=lowerCAmelCase_ , sample_weight=lowerCAmelCase_ , zero_division=lowerCAmelCase_ , )
return {"recall": float(lowerCAmelCase_ ) if score.size == 1 else score}
| 519
|
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_snake_case : Union[str, Any] = 2
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , *, # begin keyword-only arguments
lowerCAmelCase_ : str="<s>" , lowerCAmelCase_ : Dict="<pad>" , lowerCAmelCase_ : Any="</s>" , lowerCAmelCase_ : List[str]="<unk>" , lowerCAmelCase_ : Optional[Any]=None , ) -> Union[str, Any]:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = bos, unk, pad, eos
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = {}
__lowerCAmelCase = self.add_symbol(lowerCAmelCase_ )
__lowerCAmelCase = self.add_symbol(lowerCAmelCase_ )
__lowerCAmelCase = self.add_symbol(lowerCAmelCase_ )
__lowerCAmelCase = self.add_symbol(lowerCAmelCase_ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(lowerCAmelCase_ )
__lowerCAmelCase = len(self.symbols )
def __eq__( self : Dict , lowerCAmelCase_ : Dict ) -> str:
return self.indices == other.indices
def __getitem__( self : List[Any] , lowerCAmelCase_ : int ) -> Union[str, Any]:
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : Tuple ) -> List[Any]:
return len(self.symbols )
def __contains__( self : Optional[Any] , lowerCAmelCase_ : Dict ) -> Optional[int]:
return sym in self.indices
@classmethod
def lowercase ( cls : Dict , lowerCAmelCase_ : str ) -> str:
__lowerCAmelCase = cls()
d.add_from_file(lowerCAmelCase_ )
return d
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int=1 , lowerCAmelCase_ : Any=False ) -> Optional[Any]:
if word in self.indices and not overwrite:
__lowerCAmelCase = self.indices[word]
__lowerCAmelCase = self.count[idx] + n
return idx
else:
__lowerCAmelCase = len(self.symbols )
__lowerCAmelCase = idx
self.symbols.append(lowerCAmelCase_ )
self.count.append(lowerCAmelCase_ )
return idx
def lowercase ( self : str , lowerCAmelCase_ : Union[str, Any] ) -> Dict:
return 0
def lowercase ( self : Tuple , lowerCAmelCase_ : Union[str, Any] ) -> int:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
try:
with open(lowerCAmelCase_ , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(lowerCAmelCase_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(lowerCAmelCase_ ) )
return
__lowerCAmelCase = f.readlines()
__lowerCAmelCase = self._load_meta(lowerCAmelCase_ )
for line in lines[indices_start_line:]:
try:
__lowerCAmelCase , __lowerCAmelCase = line.rstrip().rsplit(' ' , 1 )
if field == "#fairseq:overwrite":
__lowerCAmelCase = True
__lowerCAmelCase , __lowerCAmelCase = line.rsplit(' ' , 1 )
else:
__lowerCAmelCase = False
__lowerCAmelCase = int(lowerCAmelCase_ )
__lowerCAmelCase = line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(lowerCAmelCase_ ) )
self.add_symbol(lowerCAmelCase_ , n=lowerCAmelCase_ , overwrite=lowerCAmelCase_ )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def a_ ( lowerCAmelCase_ : List[str] ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
__lowerCAmelCase = dict((re.sub(R'@@$', '', lowerCAmelCase_ ), v) if k.endswith('@@' ) else (re.sub(R'$', '</w>', lowerCAmelCase_ ), v) for k, v in d.items() )
__lowerCAmelCase = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
__lowerCAmelCase = d[k] # restore
return da
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : List[str] ):
# prep
if not os.path.exists(lowerCAmelCase_ ):
raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(lowerCAmelCase_, exist_ok=lowerCAmelCase_ )
print(F"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
__lowerCAmelCase = os.path.join(lowerCAmelCase_, 'checkpoint.pt' )
if not os.path.isfile(lowerCAmelCase_ ):
raise ValueError(F"""path to the file {checkpoint_file} does not exist!""" )
__lowerCAmelCase = torch.load(lowerCAmelCase_, map_location='cpu' )
__lowerCAmelCase = chkpt['cfg']['model']
# dicts
__lowerCAmelCase = os.path.join(lowerCAmelCase_, 'dict.txt' )
if not os.path.isfile(lowerCAmelCase_ ):
raise ValueError(F"""path to the file {dict_file} does not exist!""" )
__lowerCAmelCase = Dictionary.load(lowerCAmelCase_ )
__lowerCAmelCase = rewrite_dict_keys(src_dict.indices )
__lowerCAmelCase = len(lowerCAmelCase_ )
__lowerCAmelCase = os.path.join(lowerCAmelCase_, VOCAB_FILES_NAMES['vocab_file'] )
print(F"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(lowerCAmelCase_, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowerCAmelCase_, ensure_ascii=lowerCAmelCase_, indent=lowerCAmelCase_ ) )
# merges_file (bpecodes)
__lowerCAmelCase = os.path.join(lowerCAmelCase_, 'bpecodes' )
if not os.path.isfile(lowerCAmelCase_ ):
raise ValueError(F"""path to the file {bpecodes_file} does not exist!""" )
__lowerCAmelCase = os.path.join(lowerCAmelCase_, VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(lowerCAmelCase_, lowerCAmelCase_ )
# model config
__lowerCAmelCase = os.path.join(lowerCAmelCase_, 'config.json' )
__lowerCAmelCase = {
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.02,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1E-12,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(F"""Generating {biogpt_model_config_file}""" )
with open(lowerCAmelCase_, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowerCAmelCase_, ensure_ascii=lowerCAmelCase_, indent=lowerCAmelCase_ ) )
# tokenizer config
__lowerCAmelCase = os.path.join(lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase = {
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 1024,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(F"""Generating {biogpt_tokenizer_config_file}""" )
with open(lowerCAmelCase_, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowerCAmelCase_, ensure_ascii=lowerCAmelCase_, indent=lowerCAmelCase_ ) )
# model
__lowerCAmelCase = chkpt['model']
# remove unneeded keys
__lowerCAmelCase = [
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
__lowerCAmelCase = model_state_dict.pop(lowerCAmelCase_ )
else:
__lowerCAmelCase = model_state_dict.pop(lowerCAmelCase_ )
__lowerCAmelCase = BioGptConfig.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = BioGptForCausalLM(lowerCAmelCase_ )
# check that it loads ok
model_new.load_state_dict(lowerCAmelCase_ )
# save
__lowerCAmelCase = os.path.join(lowerCAmelCase_, lowerCAmelCase_ )
print(F"""Generating {pytorch_weights_dump_path}""" )
torch.save(lowerCAmelCase_, lowerCAmelCase_ )
print('Conversion is done!' )
if __name__ == "__main__":
_snake_case : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_snake_case : int = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 53
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 342
|
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
a_ = """pixel_values"""
a_ = False
a_ = TimmBackboneConfig
def __init__( self : Tuple , lowerCAmelCase_ : Any , **lowerCAmelCase_ : Optional[int] ) -> Optional[Any]:
requires_backends(self , 'timm' )
super().__init__(lowerCAmelCase_ )
__lowerCAmelCase = config
if config.backbone is None:
raise ValueError('backbone is not set in the config. Please set it to a timm model name.' )
if config.backbone not in timm.list_models():
raise ValueError(f"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(lowerCAmelCase_ , 'out_features' ) and config.out_features is not None:
raise ValueError('out_features is not supported by TimmBackbone. Please use out_indices instead.' )
__lowerCAmelCase = getattr(lowerCAmelCase_ , 'use_pretrained_backbone' , lowerCAmelCase_ )
if pretrained is None:
raise ValueError('use_pretrained_backbone is not set in the config. Please set it to True or False.' )
# We just take the final layer by default. This matches the default for the transformers models.
__lowerCAmelCase = config.out_indices if getattr(lowerCAmelCase_ , 'out_indices' , lowerCAmelCase_ ) is not None else (-1,)
__lowerCAmelCase = timm.create_model(
config.backbone , pretrained=lowerCAmelCase_ , features_only=config.features_only , in_chans=config.num_channels , out_indices=lowerCAmelCase_ , **lowerCAmelCase_ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
__lowerCAmelCase = self._backbone.return_layers
__lowerCAmelCase = {layer['module']: str(lowerCAmelCase_ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(lowerCAmelCase_ )
@classmethod
def lowercase ( cls : int , lowerCAmelCase_ : Dict , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['vision', 'timm'] )
from ...models.timm_backbone import TimmBackboneConfig
__lowerCAmelCase = kwargs.pop('config' , TimmBackboneConfig() )
__lowerCAmelCase = kwargs.pop('use_timm_backbone' , lowerCAmelCase_ )
if not use_timm:
raise ValueError('use_timm_backbone must be True for timm backbones' )
__lowerCAmelCase = kwargs.pop('num_channels' , config.num_channels )
__lowerCAmelCase = kwargs.pop('features_only' , config.features_only )
__lowerCAmelCase = kwargs.pop('use_pretrained_backbone' , config.use_pretrained_backbone )
__lowerCAmelCase = kwargs.pop('out_indices' , config.out_indices )
__lowerCAmelCase = TimmBackboneConfig(
backbone=lowerCAmelCase_ , num_channels=lowerCAmelCase_ , features_only=lowerCAmelCase_ , use_pretrained_backbone=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , )
return super()._from_config(lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : Tuple , lowerCAmelCase_ : int ) -> Dict:
pass
def lowercase ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Any=None , **lowerCAmelCase_ : Dict ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
__lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('Cannot output attentions for timm backbones at the moment' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
__lowerCAmelCase = self._all_layers
__lowerCAmelCase = self._backbone(lowerCAmelCase_ , **lowerCAmelCase_ )
__lowerCAmelCase = self._return_layers
__lowerCAmelCase = tuple(hidden_states[i] for i in self.out_indices )
else:
__lowerCAmelCase = self._backbone(lowerCAmelCase_ , **lowerCAmelCase_ )
__lowerCAmelCase = None
__lowerCAmelCase = tuple(lowerCAmelCase_ )
__lowerCAmelCase = tuple(lowerCAmelCase_ ) if hidden_states is not None else None
if not return_dict:
__lowerCAmelCase = (feature_maps,)
if output_hidden_states:
__lowerCAmelCase = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowerCAmelCase_ , hidden_states=lowerCAmelCase_ , attentions=lowerCAmelCase_ )
| 53
| 0
|
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowercase : Dict = transforms.Compose(
[
transforms.Resize((2_56, 2_56)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowerCAmelCase__ ( _a : List[str] ):
if isinstance(lowerCAmelCase_ , torch.Tensor ):
return image
elif isinstance(lowerCAmelCase_ , PIL.Image.Image ):
snake_case_ : Any = [image]
snake_case_ : Union[str, Any] = [trans(img.convert("RGB" ) ) for img in image]
snake_case_ : List[str] = torch.stack(lowerCAmelCase_ )
return image
class UpperCAmelCase_ ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
super().__init__()
# make sure scheduler can always be converted to DDIM
snake_case_ : Tuple = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if strength < 0 or strength > 1:
raise ValueError(f'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
# get the original timestep using init_timestep
snake_case_ : Optional[int] = min(int(num_inference_steps * strength ) , lowerCAmelCase_ )
snake_case_ : List[Any] = max(num_inference_steps - init_timestep , 0 )
snake_case_ : int = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Any:
if not isinstance(lowerCAmelCase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowerCAmelCase_ )}''' )
snake_case_ : List[str] = image.to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCAmelCase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
snake_case_ : Dict = init_latents.shape
snake_case_ : str = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=lowerCAmelCase_ )
# get latents
print("add noise to latents at timestep" , lowerCAmelCase_ )
snake_case_ : str = self.scheduler.add_noise(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Optional[Any] = init_latents
return latents
@torch.no_grad()
def __call__( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 0.8 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , ) -> Union[ImagePipelineOutput, Tuple]:
self.check_inputs(lowerCAmelCase_ )
# 2. Preprocess image
snake_case_ : Union[str, Any] = preprocess(lowerCAmelCase_ )
# 3. set timesteps
self.scheduler.set_timesteps(lowerCAmelCase_ , device=self.device )
snake_case_ , snake_case_ : Optional[int] = self.get_timesteps(lowerCAmelCase_ , lowerCAmelCase_ , self.device )
snake_case_ : Optional[Any] = timesteps[:1].repeat(lowerCAmelCase_ )
# 4. Prepare latent variables
snake_case_ : Union[str, Any] = self.prepare_latents(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , self.unet.dtype , self.device , lowerCAmelCase_ )
snake_case_ : List[Any] = latents
# 5. Denoising loop
for t in self.progress_bar(lowerCAmelCase_ ):
# 1. predict noise model_output
snake_case_ : Optional[int] = self.unet(lowerCAmelCase_ , lowerCAmelCase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
snake_case_ : List[str] = self.scheduler.step(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , eta=lowerCAmelCase_ , use_clipped_model_output=lowerCAmelCase_ , generator=lowerCAmelCase_ , ).prev_sample
snake_case_ : Any = (image / 2 + 0.5).clamp(0 , 1 )
snake_case_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case_ : Tuple = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 568
|
from __future__ import annotations
def a_ ( lowerCAmelCase_ : list[float] ):
if len(lowerCAmelCase_ ) < 2:
raise ValueError('Monogons and Digons are not polygons in the Euclidean space' )
if any(i <= 0 for i in nums ):
raise ValueError('All values must be greater than 0' )
__lowerCAmelCase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
| 0
|
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def A_ ( ) -> Optional[Any]:
a__ : Optional[int] = 10
a__ : Optional[int] = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
a__ : Union[str, Any] = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(lowerCAmelCase_ ) ),
} , features=lowerCAmelCase_ , )
return dataset
@pytest.fixture(scope='session' )
def A_ ( A__ , A__ ) -> str:
a__ : str = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=lowerCAmelCase_ )
return filename
# FILE_CONTENT + files
lowercase : str = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope='session' )
def A_ ( A__ ) -> int:
a__ : Dict = tmp_path_factory.mktemp('data' ) / 'file.txt'
a__ : Union[str, Any] = FILE_CONTENT
with open(lowerCAmelCase_ , 'w' ) as f:
f.write(lowerCAmelCase_ )
return filename
@pytest.fixture(scope='session' )
def A_ ( A__ ) -> Any:
import bza
a__ : Tuple = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
a__ : Union[str, Any] = bytes(lowerCAmelCase_ , 'utf-8' )
with bza.open(lowerCAmelCase_ , 'wb' ) as f:
f.write(lowerCAmelCase_ )
return path
@pytest.fixture(scope='session' )
def A_ ( A__ ) -> List[Any]:
import gzip
a__ : List[str] = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
a__ : Tuple = bytes(lowerCAmelCase_ , 'utf-8' )
with gzip.open(lowerCAmelCase_ , 'wb' ) as f:
f.write(lowerCAmelCase_ )
return path
@pytest.fixture(scope='session' )
def A_ ( A__ ) -> Any:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
a__ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
a__ : Optional[Any] = bytes(lowerCAmelCase_ , 'utf-8' )
with lza.frame.open(lowerCAmelCase_ , 'wb' ) as f:
f.write(lowerCAmelCase_ )
return path
@pytest.fixture(scope='session' )
def A_ ( A__ , A__ ) -> Tuple:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
a__ : List[str] = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(lowerCAmelCase_ , 'w' ) as archive:
archive.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_ ) )
return path
@pytest.fixture(scope='session' )
def A_ ( A__ , A__ ) -> Any:
import tarfile
a__ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(lowerCAmelCase_ , 'w' ) as f:
f.add(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_ ) )
return path
@pytest.fixture(scope='session' )
def A_ ( A__ ) -> int:
import lzma
a__ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
a__ : Tuple = bytes(lowerCAmelCase_ , 'utf-8' )
with lzma.open(lowerCAmelCase_ , 'wb' ) as f:
f.write(lowerCAmelCase_ )
return path
@pytest.fixture(scope='session' )
def A_ ( A__ , A__ ) -> List[Any]:
import zipfile
a__ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(lowerCAmelCase_ , 'w' ) as f:
f.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_ ) )
return path
@pytest.fixture(scope='session' )
def A_ ( A__ ) -> Any:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
a__ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
a__ : List[str] = bytes(lowerCAmelCase_ , 'utf-8' )
with zstd.open(lowerCAmelCase_ , 'wb' ) as f:
f.write(lowerCAmelCase_ )
return path
@pytest.fixture(scope='session' )
def A_ ( A__ ) -> Any:
a__ : List[str] = tmp_path_factory.mktemp('data' ) / 'file.xml'
a__ : Tuple = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(lowerCAmelCase_ , 'w' ) as f:
f.write(lowerCAmelCase_ )
return filename
lowercase : Dict = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
lowercase : Union[str, Any] = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
lowercase : Dict = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
lowercase : Tuple = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
lowercase : Tuple = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope='session' )
def A_ ( ) -> List[str]:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def A_ ( A__ ) -> Optional[int]:
a__ : Union[str, Any] = datasets.Dataset.from_dict(lowerCAmelCase_ )
a__ : List[str] = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=lowerCAmelCase_ )
return path
@pytest.fixture(scope='session' )
def A_ ( A__ ) -> Optional[int]:
a__ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(lowerCAmelCase_ ) ) as con:
a__ : Any = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def A_ ( A__ ) -> str:
a__ : List[str] = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(lowerCAmelCase_ , 'w' , newline='' ) as f:
a__ : str = csv.DictWriter(lowerCAmelCase_ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCAmelCase_ )
return path
@pytest.fixture(scope='session' )
def A_ ( A__ ) -> str:
a__ : str = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(lowerCAmelCase_ , 'w' , newline='' ) as f:
a__ : str = csv.DictWriter(lowerCAmelCase_ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCAmelCase_ )
return path
@pytest.fixture(scope='session' )
def A_ ( A__ , A__ ) -> Optional[Any]:
import bza
a__ : Any = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(lowerCAmelCase_ , 'rb' ) as f:
a__ : Dict = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowerCAmelCase_ , 'wb' ) as f:
f.write(lowerCAmelCase_ )
return path
@pytest.fixture(scope='session' )
def A_ ( A__ , A__ , A__ ) -> Any:
a__ : str = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowerCAmelCase_ , 'w' ) as f:
f.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_ ) )
f.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_ ) )
return path
@pytest.fixture(scope='session' )
def A_ ( A__ , A__ , A__ ) -> Optional[Any]:
a__ : str = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowerCAmelCase_ , 'w' ) as f:
f.write(lowerCAmelCase_ , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(lowerCAmelCase_ , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def A_ ( A__ , A__ , A__ ) -> int:
a__ : Tuple = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(lowerCAmelCase_ , 'w' ) as f:
f.write(lowerCAmelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCAmelCase_ ) ) )
f.write(lowerCAmelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCAmelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def A_ ( A__ ) -> List[str]:
a__ : Any = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
a__ : List[Any] = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(lowerCAmelCase_ , 'wb' ) as f:
a__ : List[Any] = pq.ParquetWriter(lowerCAmelCase_ , schema=lowerCAmelCase_ )
a__ : Any = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowerCAmelCase_ ) )] for k in DATA[0]} , schema=lowerCAmelCase_ )
writer.write_table(lowerCAmelCase_ )
writer.close()
return path
@pytest.fixture(scope='session' )
def A_ ( A__ ) -> int:
a__ : Any = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
a__ : Tuple = {'data': DATA}
with open(lowerCAmelCase_ , 'w' ) as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
return path
@pytest.fixture(scope='session' )
def A_ ( A__ ) -> Tuple:
a__ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
a__ : Any = {'data': DATA_DICT_OF_LISTS}
with open(lowerCAmelCase_ , 'w' ) as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
return path
@pytest.fixture(scope='session' )
def A_ ( A__ ) -> List[Any]:
a__ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(lowerCAmelCase_ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowerCAmelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def A_ ( A__ ) -> str:
a__ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(lowerCAmelCase_ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowerCAmelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def A_ ( A__ ) -> Any:
a__ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(lowerCAmelCase_ , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(lowerCAmelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def A_ ( A__ ) -> Tuple:
a__ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(lowerCAmelCase_ , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(lowerCAmelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def A_ ( A__ , A__ ) -> Tuple:
import gzip
a__ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(lowerCAmelCase_ , 'rb' ) as orig_file:
with gzip.open(lowerCAmelCase_ , 'wb' ) as zipped_file:
zipped_file.writelines(lowerCAmelCase_ )
return path
@pytest.fixture(scope='session' )
def A_ ( A__ , A__ ) -> Optional[int]:
import gzip
a__ : List[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(lowerCAmelCase_ , 'rb' ) as orig_file:
with gzip.open(lowerCAmelCase_ , 'wb' ) as zipped_file:
zipped_file.writelines(lowerCAmelCase_ )
return path
@pytest.fixture(scope='session' )
def A_ ( A__ , A__ , A__ ) -> str:
a__ : int = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(lowerCAmelCase_ , 'w' ) as f:
f.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_ ) )
f.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_ ) )
return path
@pytest.fixture(scope='session' )
def A_ ( A__ , A__ , A__ , A__ ) -> int:
a__ : Any = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(lowerCAmelCase_ , 'w' ) as f:
f.write(lowerCAmelCase_ , arcname=os.path.join('nested' , os.path.basename(lowerCAmelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def A_ ( A__ , A__ , A__ ) -> Tuple:
a__ : Any = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(lowerCAmelCase_ , 'w' ) as f:
f.write(lowerCAmelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCAmelCase_ ) ) )
f.write(lowerCAmelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCAmelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def A_ ( A__ , A__ , A__ ) -> Optional[int]:
a__ : Tuple = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(lowerCAmelCase_ , 'w' ) as f:
f.add(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_ ) )
f.add(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_ ) )
return path
@pytest.fixture(scope='session' )
def A_ ( A__ , A__ , A__ , A__ ) -> List[str]:
a__ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(lowerCAmelCase_ , 'w' ) as f:
f.add(lowerCAmelCase_ , arcname=os.path.join('nested' , os.path.basename(lowerCAmelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def A_ ( A__ ) -> Optional[Any]:
a__ : Dict = ['0', '1', '2', '3']
a__ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(lowerCAmelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def A_ ( A__ ) -> Tuple:
a__ : Any = ['0', '1', '2', '3']
a__ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(lowerCAmelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def A_ ( A__ ) -> Optional[int]:
a__ : Optional[int] = ['0', '1', '2', '3']
a__ : str = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(lowerCAmelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def A_ ( A__ , A__ , A__ ) -> Any:
a__ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(lowerCAmelCase_ , 'w' ) as f:
f.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_ ) )
f.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_ ) )
return path
@pytest.fixture(scope='session' )
def A_ ( A__ , A__ , A__ ) -> Union[str, Any]:
a__ : List[str] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(lowerCAmelCase_ , 'w' ) as f:
f.write(lowerCAmelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCAmelCase_ ) ) )
f.write(lowerCAmelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCAmelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def A_ ( A__ , A__ , A__ ) -> str:
a__ : Dict = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(lowerCAmelCase_ , 'w' ) as f:
f.write(lowerCAmelCase_ , arcname=os.path.basename('unsupported.ext' ) )
f.write(lowerCAmelCase_ , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def A_ ( A__ ) -> Optional[int]:
a__ : List[str] = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
a__ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(lowerCAmelCase_ )
return path
@pytest.fixture(scope='session' )
def A_ ( ) -> str:
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def A_ ( ) -> Dict:
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def A_ ( A__ , A__ ) -> str:
a__ : int = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(lowerCAmelCase_ , 'w' ) as f:
f.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_ ) )
f.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_ ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def A_ ( A__ ) -> List[Any]:
a__ : Union[str, Any] = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 302
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Dict=3_2 , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : Union[str, Any]=1_0 , lowerCAmelCase_ : List[str]=[1_0, 2_0, 3_0, 4_0] , lowerCAmelCase_ : Optional[int]=[1, 1, 2, 1] , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Tuple="relu" , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : Optional[int]=None , ) -> int:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = embeddings_size
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = depths
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = hidden_act
__lowerCAmelCase = num_labels
__lowerCAmelCase = scope
__lowerCAmelCase = len(lowerCAmelCase_ )
def lowercase ( self : Optional[int] ) -> List[Any]:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = self.get_config()
return config, pixel_values
def lowercase ( self : Tuple ) -> List[Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowercase ( self : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] ) -> str:
__lowerCAmelCase = FlaxRegNetModel(config=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def lowercase ( self : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int ) -> Tuple:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = FlaxRegNetForImageClassification(config=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : List[Any] ) -> Optional[Any]:
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
a_ = False
a_ = False
a_ = False
def lowercase ( self : Dict ) -> None:
__lowerCAmelCase = FlaxRegNetModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ )
def lowercase ( self : int ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self : str ) -> Union[str, Any]:
return
def lowercase ( self : Dict ) -> str:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] ) -> Tuple:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def lowercase ( self : Union[str, Any] ) -> Any:
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def lowercase ( self : Tuple ) -> Tuple:
pass
def lowercase ( self : Optional[Any] ) -> str:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowercase ( self : List[Any] ) -> Union[str, Any]:
def check_hidden_states_output(lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ):
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
__lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase_ ) , expected_num_stages + 1 )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : str ) -> str:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCAmelCase = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = model_class(lowerCAmelCase_ )
@jax.jit
def model_jitted(lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Dict ):
return model(pixel_values=lowerCAmelCase_ , **lowerCAmelCase_ )
with self.subTest('JIT Enabled' ):
__lowerCAmelCase = model_jitted(**lowerCAmelCase_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__lowerCAmelCase = model_jitted(**lowerCAmelCase_ ).to_tuple()
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def a_ ( ):
__lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase ( self : Union[str, Any] ) -> Optional[Any]:
return AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None
@slow
def lowercase ( self : Optional[Any] ) -> Union[str, Any]:
__lowerCAmelCase = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=lowerCAmelCase_ , return_tensors='np' )
__lowerCAmelCase = model(**lowerCAmelCase_ )
# verify the logits
__lowerCAmelCase = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__lowerCAmelCase = jnp.array([-0.41_80, -1.50_51, -3.48_36] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 53
| 0
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCAmelCase: str = logging.get_logger(__name__)
_lowerCAmelCase: Union[str, Any] = {
'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json',
}
class lowercase_ (_UpperCamelCase ):
snake_case ='instructblip_vision_model'
def __init__( self , lowercase_=1408 , lowercase_=6144 , lowercase_=39 , lowercase_=16 , lowercase_=224 , lowercase_=14 , lowercase_="gelu" , lowercase_=1e-6 , lowercase_=0.0 , lowercase_=1e-10 , lowercase_=True , **lowercase_ , ) -> str:
super().__init__(**lowerCAmelCase_)
a__ =hidden_size
a__ =intermediate_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =patch_size
a__ =image_size
a__ =initializer_range
a__ =attention_dropout
a__ =layer_norm_eps
a__ =hidden_act
a__ =qkv_bias
@classmethod
def __UpperCamelCase ( cls , lowercase_ , **lowercase_) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowerCAmelCase_)
a__ , a__ =cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_)
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type') == "instructblip":
a__ =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_)
class lowercase_ (_UpperCamelCase ):
snake_case ='instructblip_qformer'
def __init__( self , lowercase_=30522 , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3072 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=0 , lowercase_="absolute" , lowercase_=2 , lowercase_=1408 , **lowercase_ , ) -> int:
super().__init__(pad_token_id=lowerCAmelCase_ , **lowerCAmelCase_)
a__ =vocab_size
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =hidden_act
a__ =intermediate_size
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =max_position_embeddings
a__ =initializer_range
a__ =layer_norm_eps
a__ =position_embedding_type
a__ =cross_attention_frequency
a__ =encoder_hidden_size
@classmethod
def __UpperCamelCase ( cls , lowercase_ , **lowercase_) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowerCAmelCase_)
a__ , a__ =cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_)
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type') == "instructblip":
a__ =config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_)
class lowercase_ (_UpperCamelCase ):
snake_case ='instructblip'
snake_case =True
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=32 , **lowercase_) -> int:
super().__init__(**lowerCAmelCase_)
if vision_config is None:
a__ ={}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.')
if qformer_config is None:
a__ ={}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.')
if text_config is None:
a__ ={}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).')
a__ =InstructBlipVisionConfig(**lowerCAmelCase_)
a__ =InstructBlipQFormerConfig(**lowerCAmelCase_)
a__ =text_config['model_type'] if 'model_type' in text_config else 'opt'
a__ =CONFIG_MAPPING[text_model_type](**lowerCAmelCase_)
a__ =self.text_config.tie_word_embeddings
a__ =self.text_config.is_encoder_decoder
a__ =num_query_tokens
a__ =self.vision_config.hidden_size
a__ =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
a__ =1.0
a__ =0.02
@classmethod
def __UpperCamelCase ( cls , lowercase_ , lowercase_ , lowercase_ , **lowercase_ , ) -> Union[str, Any]:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowerCAmelCase_ , )
def __UpperCamelCase ( self) -> Dict:
a__ =copy.deepcopy(self.__dict__)
a__ =self.vision_config.to_dict()
a__ =self.qformer_config.to_dict()
a__ =self.text_config.to_dict()
a__ =self.__class__.model_type
return output
| 20
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
_snake_case : Optional[int] = logging.getLogger(__name__)
_snake_case : Dict = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
_snake_case : List[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(_UpperCamelCase )} , )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
a_ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def lowercase ( self : List[Any] ) -> List[Any]:
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path' )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
a_ = field(default=_UpperCamelCase , metadata={"""help""": """The input training data file (a text file)."""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
a_ = field(
default=5 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated. Default to the max input length of the model."""
)
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
a_ = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
def lowercase ( self : int ) -> int:
if self.train_file is not None:
__lowerCAmelCase = self.train_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
__lowerCAmelCase = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : Union[str, Any] ):
with open(lowerCAmelCase_, 'r', encoding='utf-8' ) as f:
__lowerCAmelCase = [json.loads(lowerCAmelCase_ ) for line in f.read().splitlines() if (len(lowerCAmelCase_ ) > 0 and not line.isspace())]
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
__lowerCAmelCase = {c: dataset[c] for c in dataset.column_names}
__lowerCAmelCase = refs
return Dataset.from_dict(lowerCAmelCase_ )
def a_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout )], )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s', lowerCAmelCase_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__lowerCAmelCase = load_dataset(data_args.dataset_name, data_args.dataset_config_name )
if "validation" not in datasets.keys():
__lowerCAmelCase = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=F"""train[:{data_args.validation_split_percentage}%]""", )
__lowerCAmelCase = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=F"""train[{data_args.validation_split_percentage}%:]""", )
else:
__lowerCAmelCase = {}
if data_args.train_file is not None:
__lowerCAmelCase = data_args.train_file
if data_args.validation_file is not None:
__lowerCAmelCase = data_args.validation_file
__lowerCAmelCase = data_args.train_file.split('.' )[-1]
if extension == "txt":
__lowerCAmelCase = 'text'
__lowerCAmelCase = load_dataset(lowerCAmelCase_, data_files=lowerCAmelCase_ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
__lowerCAmelCase = AutoConfig.from_pretrained(model_args.config_name, **lowerCAmelCase_ )
elif model_args.model_name_or_path:
__lowerCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path, **lowerCAmelCase_ )
else:
__lowerCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
__lowerCAmelCase = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
__lowerCAmelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **lowerCAmelCase_ )
elif model_args.model_name_or_path:
__lowerCAmelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **lowerCAmelCase_ )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
__lowerCAmelCase = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path, from_tf=bool('.ckpt' in model_args.model_name_or_path ), config=lowerCAmelCase_, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
else:
logger.info('Training new model from scratch' )
__lowerCAmelCase = AutoModelForMaskedLM.from_config(lowerCAmelCase_ )
model.resize_token_embeddings(len(lowerCAmelCase_ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
__lowerCAmelCase = datasets['train'].column_names
else:
__lowerCAmelCase = datasets['validation'].column_names
__lowerCAmelCase = 'text' if 'text' in column_names else column_names[0]
__lowerCAmelCase = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(lowerCAmelCase_ : str ):
# Remove empty lines
__lowerCAmelCase = [line for line in examples['text'] if len(lowerCAmelCase_ ) > 0 and not line.isspace()]
return tokenizer(examples['text'], padding=lowerCAmelCase_, truncation=lowerCAmelCase_, max_length=data_args.max_seq_length )
__lowerCAmelCase = datasets.map(
lowerCAmelCase_, batched=lowerCAmelCase_, num_proc=data_args.preprocessing_num_workers, remove_columns=[text_column_name], load_from_cache_file=not data_args.overwrite_cache, )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
__lowerCAmelCase = add_chinese_references(tokenized_datasets['train'], data_args.train_ref_file )
if data_args.validation_ref_file is not None:
__lowerCAmelCase = add_chinese_references(
tokenized_datasets['validation'], data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
__lowerCAmelCase = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
__lowerCAmelCase = False
# Data collator
# This one will take care of randomly masking the tokens.
__lowerCAmelCase = DataCollatorForWholeWordMask(tokenizer=lowerCAmelCase_, mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowerCAmelCase = Trainer(
model=lowerCAmelCase_, args=lowerCAmelCase_, train_dataset=tokenized_datasets['train'] if training_args.do_train else None, eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None, tokenizer=lowerCAmelCase_, data_collator=lowerCAmelCase_, )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__lowerCAmelCase = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
__lowerCAmelCase = model_args.model_name_or_path
else:
__lowerCAmelCase = None
__lowerCAmelCase = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
__lowerCAmelCase = os.path.join(training_args.output_dir, 'train_results.txt' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase_, 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir, 'trainer_state.json' ) )
# Evaluation
__lowerCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowerCAmelCase = trainer.evaluate()
__lowerCAmelCase = math.exp(eval_output['eval_loss'] )
__lowerCAmelCase = perplexity
__lowerCAmelCase = os.path.join(training_args.output_dir, 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase_, 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
return results
def a_ ( lowerCAmelCase_ : Tuple ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 53
| 0
|
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
lowercase__ : Tuple = credit_card_number
lowercase__ : Any = 0
lowercase__ : Any = len(lowerCAmelCase_ ) - 2
for i in range(lowerCAmelCase_ ,-1 ,-2 ):
# double the value of every second digit
lowercase__ : Union[str, Any] = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
lowercase__ : Optional[Any] = cc_number[:i] + str(lowerCAmelCase_ ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(lowerCAmelCase_ ) - 1 ,-1 ,-2 ):
total += int(cc_number[i] )
return total % 10 == 0
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> str:
lowercase__ : Union[str, Any] = F"""{credit_card_number} is an invalid credit card number because"""
if not credit_card_number.isdigit():
print(F"""{error_message} it has nonnumerical characters.""" )
return False
if not 13 <= len(lowerCAmelCase_ ) <= 16:
print(F"""{error_message} of its length.""" )
return False
if not validate_initial_digits(lowerCAmelCase_ ):
print(F"""{error_message} of its first two digits.""" )
return False
if not luhn_validation(lowerCAmelCase_ ):
print(F"""{error_message} it fails the Luhn check.""" )
return False
print(F"""{credit_card_number} is a valid credit card number.""" )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('''4111111111111111''')
validate_credit_card_number('''32323''')
| 397
|
def a_ ( lowerCAmelCase_ : int = 200_0000 ):
__lowerCAmelCase = [0 for i in range(n + 1 )]
__lowerCAmelCase = 1
__lowerCAmelCase = 1
for i in range(2, int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i, n + 1, lowerCAmelCase_ ):
__lowerCAmelCase = 1
__lowerCAmelCase = 0
for i in range(lowerCAmelCase_ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F"""{solution() = }""")
| 53
| 0
|
from functools import lru_cache
def UpperCamelCase__ ( UpperCAmelCase_ ) -> Dict:
'''simple docstring'''
_lowercase : Union[str, Any] = 2
_lowercase : Optional[Any] = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(lowerCAmelCase_ )
if n > 1:
factors.add(lowerCAmelCase_ )
return factors
@lru_cache
def UpperCamelCase__ ( UpperCAmelCase_ ) -> Optional[int]:
'''simple docstring'''
return len(unique_prime_factors(lowerCAmelCase_ ) )
def UpperCamelCase__ ( UpperCAmelCase_ ) -> Any:
'''simple docstring'''
return len(set(lowerCAmelCase_ ) ) in (0, 1)
def UpperCamelCase__ ( UpperCAmelCase_ ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Any = 2
while True:
# Increment each value of a generated range
_lowercase : Union[str, Any] = [base + i for i in range(lowerCAmelCase_ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
_lowercase : Union[str, Any] = [upf_len(lowerCAmelCase_ ) for x in group]
checker.append(lowerCAmelCase_ )
# If all numbers in the list are equal, return the group variable.
if equality(lowerCAmelCase_ ):
return group
# Increment our base variable by 1
base += 1
def UpperCamelCase__ ( UpperCAmelCase_ = 4 ) -> Dict:
'''simple docstring'''
_lowercase : Tuple = run(lowerCAmelCase_ )
return results[0] if len(lowerCAmelCase_ ) else None
if __name__ == "__main__":
print(solution())
| 322
|
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
_snake_case : Tuple = logging.getLogger()
_snake_case : Any = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def lowercase ( self : Any , lowerCAmelCase_ : Dict ) -> Optional[int]:
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
__lowerCAmelCase = {'source': 'What is love ?', 'target': 'life'}
__lowerCAmelCase = {'train': 1_2, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
__lowerCAmelCase = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(lowerCAmelCase_ , f"""{split}.{field}""" ) , 'w' ) as f:
f.write(lowerCAmelCase_ )
def lowercase ( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : str = "pytorch" ) -> List[str]:
__lowerCAmelCase = self.get_auto_remove_tmp_dir()
__lowerCAmelCase = os.path.join(lowerCAmelCase_ , 'output' )
__lowerCAmelCase = os.path.join(lowerCAmelCase_ , 'data' )
self._create_dummy_data(data_dir=lowerCAmelCase_ )
__lowerCAmelCase = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
__lowerCAmelCase = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(lowerCAmelCase_ , env=self.get_env() )
__lowerCAmelCase = os.path.join(lowerCAmelCase_ , 'metrics.json' )
with open(lowerCAmelCase_ ) as f:
__lowerCAmelCase = json.load(lowerCAmelCase_ )
return result
@require_torch_gpu
def lowercase ( self : str ) -> int:
__lowerCAmelCase = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
def lowercase ( self : List[str] ) -> Dict:
__lowerCAmelCase = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_gpu
@require_ray
def lowercase ( self : int ) -> Tuple:
__lowerCAmelCase = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
@require_ray
def lowercase ( self : List[Any] ) -> str:
__lowerCAmelCase = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
| 53
| 0
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A__ = 16
A__ = 32
def _lowerCamelCase ( a_ : Accelerator , a_ : int = 16):
lowerCamelCase :Tuple = AutoTokenizer.from_pretrained('''bert-base-cased''')
lowerCamelCase :Optional[int] = load_dataset('''glue''' , '''mrpc''')
def tokenize_function(a_ : str):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase :Tuple = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase :str = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase :Union[str, Any] = tokenized_datasets.rename_column('''label''' , '''labels''')
def collate_fn(a_ : Tuple):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase :List[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase :Union[str, Any] = 16
elif accelerator.mixed_precision != "no":
lowerCamelCase :Optional[int] = 8
else:
lowerCamelCase :str = None
return tokenizer.pad(
lowerCAmelCase_ , padding='''longest''' , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowerCamelCase :Any = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_)
lowerCamelCase :str = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_)
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A__ = mocked_dataloaders # noqa: F811
def _lowerCamelCase ( a_ : Dict , a_ : List[Any]):
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , lowerCAmelCase_) == "1":
lowerCamelCase :List[Any] = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
lowerCamelCase :str = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir)
else:
lowerCamelCase :int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase :Optional[Any] = config['''lr''']
lowerCamelCase :List[str] = int(config['''num_epochs'''])
lowerCamelCase :Optional[int] = int(config['''seed'''])
lowerCamelCase :int = int(config['''batch_size'''])
set_seed(lowerCAmelCase_)
lowerCamelCase , lowerCamelCase :Union[str, Any] = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_)
lowerCamelCase :Any = evaluate.load('''glue''' , '''mrpc''')
# If the batch size is too big we use gradient accumulation
lowerCamelCase :Union[str, Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCamelCase :List[Any] = batch_size // MAX_GPU_BATCH_SIZE
lowerCamelCase :int = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase :int = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowerCAmelCase_)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase :Optional[Any] = model.to(accelerator.device)
# Instantiate optimizer
lowerCamelCase :str = AdamW(params=model.parameters() , lr=lowerCAmelCase_)
# Instantiate scheduler
lowerCamelCase :List[str] = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ , num_warmup_steps=1_00 , num_training_steps=(len(lowerCAmelCase_) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :Dict = accelerator.prepare(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
lowerCamelCase :List[str] = os.path.split(lowerCAmelCase_)[-1].split('''.''')[0]
accelerator.init_trackers(lowerCAmelCase_ , lowerCAmelCase_)
# Now we train the model
for epoch in range(lowerCAmelCase_):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
lowerCamelCase :int = 0
for step, batch in enumerate(lowerCAmelCase_):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
lowerCamelCase :Optional[int] = model(**lowerCAmelCase_)
lowerCamelCase :int = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
lowerCamelCase :str = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase_)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase_):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device)
with torch.no_grad():
lowerCamelCase :int = model(**lowerCAmelCase_)
lowerCamelCase :Optional[int] = outputs.logits.argmax(dim=-1)
lowerCamelCase , lowerCamelCase :str = accelerator.gather_for_metrics((predictions, batch['''labels''']))
metric.add_batch(
predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , )
lowerCamelCase :List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , lowerCAmelCase_)
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(lowerCAmelCase_),
'''epoch''': epoch,
} , step=lowerCAmelCase_ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def _lowerCamelCase ( ):
lowerCamelCase :List[Any] = argparse.ArgumentParser(description='''Simple example of training script.''')
parser.add_argument(
'''--mixed_precision''' , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''')
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=lowerCAmelCase_ , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
lowerCamelCase :Dict = parser.parse_args()
lowerCamelCase :int = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(lowerCAmelCase_ , lowerCAmelCase_)
if __name__ == "__main__":
main()
| 166
|
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Union[str, Any]="resnet50" , lowerCAmelCase_ : str=3 , lowerCAmelCase_ : List[str]=3_2 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Optional[Any]=True , ) -> List[Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = out_indices if out_indices is not None else [4]
__lowerCAmelCase = stage_names
__lowerCAmelCase = out_features
__lowerCAmelCase = backbone
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = use_pretrained_backbone
__lowerCAmelCase = is_training
def lowercase ( self : List[str] ) -> Any:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = self.get_config()
return config, pixel_values
def lowercase ( self : List[Any] ) -> Union[str, Any]:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowercase ( self : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ) -> int:
__lowerCAmelCase = TimmBackbone(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 1_4, 1_4) , )
def lowercase ( self : List[str] ) -> str:
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (TimmBackbone,) if is_torch_available() else ()
a_ = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
a_ = False
a_ = False
a_ = False
a_ = False
def lowercase ( self : Tuple ) -> int:
__lowerCAmelCase = TimmBackboneModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ )
def lowercase ( self : Dict ) -> List[str]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self : Union[str, Any] ) -> Optional[int]:
__lowerCAmelCase = 'resnet18'
__lowerCAmelCase = 'microsoft/resnet-18'
__lowerCAmelCase = AutoBackbone.from_pretrained(lowerCAmelCase_ , use_timm_backbone=lowerCAmelCase_ )
__lowerCAmelCase = AutoBackbone.from_pretrained(lowerCAmelCase_ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__lowerCAmelCase = AutoBackbone.from_pretrained(lowerCAmelCase_ , use_timm_backbone=lowerCAmelCase_ , out_indices=[1, 2, 3] )
__lowerCAmelCase = AutoBackbone.from_pretrained(lowerCAmelCase_ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def lowercase ( self : List[str] ) -> Tuple:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def lowercase ( self : Dict ) -> int:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def lowercase ( self : str ) -> str:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def lowercase ( self : Any ) -> str:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def lowercase ( self : Optional[int] ) -> Optional[Any]:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def lowercase ( self : Dict ) -> Any:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def lowercase ( self : Any ) -> Optional[int]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def lowercase ( self : Union[str, Any] ) -> Tuple:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def lowercase ( self : List[str] ) -> Optional[int]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def lowercase ( self : Dict ) -> int:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def lowercase ( self : Tuple ) -> List[str]:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def lowercase ( self : int ) -> Optional[int]:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def lowercase ( self : Union[str, Any] ) -> str:
pass
@unittest.skip('Safetensors is not supported by timm.' )
def lowercase ( self : Dict ) -> str:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase ( self : List[str] ) -> Optional[Any]:
pass
def lowercase ( self : Union[str, Any] ) -> Optional[Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowercase ( self : int ) -> Union[str, Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
__lowerCAmelCase = self.has_attentions
# no need to test all models as different heads yield the same functionality
__lowerCAmelCase = self.all_model_classes[0]
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
__lowerCAmelCase = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = model(**lowerCAmelCase_ )
__lowerCAmelCase = outputs[0][-1]
# Encoder-/Decoder-only models
__lowerCAmelCase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__lowerCAmelCase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowerCAmelCase_ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowercase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(**lowerCAmelCase_ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__lowerCAmelCase = copy.deepcopy(lowerCAmelCase_ )
__lowerCAmelCase = None
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(**lowerCAmelCase_ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__lowerCAmelCase = copy.deepcopy(lowerCAmelCase_ )
__lowerCAmelCase = False
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(**lowerCAmelCase_ )
| 53
| 0
|
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : Any = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[int]:
snake_case : Dict = SwinConfig.from_pretrained(
"""microsoft/swin-tiny-patch4-window7-224""" ,out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
snake_case : Union[str, Any] = MaskFormerConfig(backbone_config=lowerCAmelCase_ )
snake_case : List[str] = """huggingface/label-files"""
if "ade20k-full" in model_name:
# this should be ok
snake_case : List[Any] = 847
snake_case : str = """maskformer-ade20k-full-id2label.json"""
elif "ade" in model_name:
# this should be ok
snake_case : Optional[int] = 150
snake_case : List[str] = """ade20k-id2label.json"""
elif "coco-stuff" in model_name:
# this should be ok
snake_case : str = 171
snake_case : Optional[Any] = """maskformer-coco-stuff-id2label.json"""
elif "coco" in model_name:
# TODO
snake_case : Optional[Any] = 133
snake_case : Optional[Any] = """coco-panoptic-id2label.json"""
elif "cityscapes" in model_name:
# this should be ok
snake_case : Optional[int] = 19
snake_case : Dict = """cityscapes-id2label.json"""
elif "vistas" in model_name:
# this should be ok
snake_case : Union[str, Any] = 65
snake_case : Union[str, Any] = """mapillary-vistas-id2label.json"""
snake_case : List[Any] = json.load(open(hf_hub_download(lowerCAmelCase_ ,lowerCAmelCase_ ,repo_type="""dataset""" ) ,"""r""" ) )
snake_case : Union[str, Any] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[int]:
snake_case : str = []
# stem
# fmt: off
rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.layers.{i}.downsample.reduction.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") )
for source_index, target_index in zip(range(3 ,0 ,-1 ) ,range(0 ,3 ) ):
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") )
rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") )
# heads on top
rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") )
for i in range(3 ):
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", f"""mask_embedder.{i}.0.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", f"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Optional[Any]:
snake_case : Dict = dct.pop(lowerCAmelCase_ )
snake_case : Optional[int] = val
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Union[str, Any]:
snake_case : Any = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
snake_case : int = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
snake_case : Union[str, Any] = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
snake_case : Optional[int] = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case : int = in_proj_weight[:dim, :]
snake_case : Dict = in_proj_bias[: dim]
snake_case : str = in_proj_weight[
dim : dim * 2, :
]
snake_case : Tuple = in_proj_bias[
dim : dim * 2
]
snake_case : Dict = in_proj_weight[
-dim :, :
]
snake_case : Tuple = in_proj_bias[-dim :]
# fmt: on
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> int:
# fmt: off
snake_case : int = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
snake_case : Optional[int] = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
snake_case : List[str] = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case : Optional[Any] = in_proj_weight[: hidden_size, :]
snake_case : Union[str, Any] = in_proj_bias[:config.hidden_size]
snake_case : Optional[int] = in_proj_weight[hidden_size : hidden_size * 2, :]
snake_case : int = in_proj_bias[hidden_size : hidden_size * 2]
snake_case : List[Any] = in_proj_weight[-hidden_size :, :]
snake_case : str = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
snake_case : List[Any] = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
snake_case : Any = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case : Tuple = in_proj_weight[: hidden_size, :]
snake_case : Tuple = in_proj_bias[:config.hidden_size]
snake_case : str = in_proj_weight[hidden_size : hidden_size * 2, :]
snake_case : Any = in_proj_bias[hidden_size : hidden_size * 2]
snake_case : int = in_proj_weight[-hidden_size :, :]
snake_case : List[Any] = in_proj_bias[-hidden_size :]
# fmt: on
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
snake_case : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case : str = Image.open(requests.get(lowerCAmelCase_ ,stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase = False ) -> int:
snake_case : List[str] = get_maskformer_config(lowerCAmelCase_ )
# load original state_dict
with open(lowerCAmelCase_ ,"""rb""" ) as f:
snake_case : int = pickle.load(lowerCAmelCase_ )
snake_case : Optional[Any] = data["""model"""]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
snake_case : Dict = create_rename_keys(lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
read_in_swin_q_k_v(lowerCAmelCase_ ,config.backbone_config )
read_in_decoder_q_k_v(lowerCAmelCase_ ,lowerCAmelCase_ )
# update to torch tensors
for key, value in state_dict.items():
snake_case : List[str] = torch.from_numpy(lowerCAmelCase_ )
# load 🤗 model
snake_case : Any = MaskFormerForInstanceSegmentation(lowerCAmelCase_ )
model.eval()
for name, param in model.named_parameters():
print(lowerCAmelCase_ ,param.shape )
snake_case , snake_case : Optional[Any] = model.load_state_dict(lowerCAmelCase_ ,strict=lowerCAmelCase_ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCAmelCase_ ) == 0, f"""Unexpected keys: {unexpected_keys}"""
# verify results
snake_case : List[str] = prepare_img()
if "vistas" in model_name:
snake_case : int = 65
elif "cityscapes" in model_name:
snake_case : int = 65535
else:
snake_case : int = 255
snake_case : Optional[Any] = True if """ade""" in model_name else False
snake_case : Union[str, Any] = MaskFormerImageProcessor(ignore_index=lowerCAmelCase_ ,reduce_labels=lowerCAmelCase_ )
snake_case : Optional[Any] = image_processor(lowerCAmelCase_ ,return_tensors="""pt""" )
snake_case : Optional[int] = model(**lowerCAmelCase_ )
print("""Logits:""" ,outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
snake_case : Union[str, Any] = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] ,lowerCAmelCase_ ,atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
print("""Pushing model and image processor to the hub...""" )
model.push_to_hub(f"""nielsr/{model_name}""" )
image_processor.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowerCamelCase : List[str] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 587
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def a_ ( lowerCAmelCase_ : str=None ):
if subparsers is not None:
__lowerCAmelCase = subparsers.add_parser('env' )
else:
__lowerCAmelCase = argparse.ArgumentParser('Accelerate env command' )
parser.add_argument(
'--config_file', default=lowerCAmelCase_, help='The config file to use for the default values in the launching script.' )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase_ )
return parser
def a_ ( lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = torch.__version__
__lowerCAmelCase = torch.cuda.is_available()
__lowerCAmelCase = is_xpu_available()
__lowerCAmelCase = is_npu_available()
__lowerCAmelCase = 'Not found'
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(lowerCAmelCase_ ):
__lowerCAmelCase = load_config_from_file(args.config_file ).to_dict()
__lowerCAmelCase = {
'`Accelerate` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'Numpy version': np.__version__,
'PyTorch version (GPU?)': F"""{pt_version} ({pt_cuda_available})""",
'PyTorch XPU available': str(lowerCAmelCase_ ),
'PyTorch NPU available': str(lowerCAmelCase_ ),
'System RAM': F"""{psutil.virtual_memory().total / 1024 ** 3:.2f} GB""",
}
if pt_cuda_available:
__lowerCAmelCase = torch.cuda.get_device_name()
print('\nCopy-and-paste the text below in your GitHub issue\n' )
print('\n'.join([F"""- {prop}: {val}""" for prop, val in info.items()] ) )
print('- `Accelerate` default config:' if args.config_file is None else '- `Accelerate` config passed:' )
__lowerCAmelCase = (
'\n'.join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(lowerCAmelCase_, lowerCAmelCase_ )
else F"""\t{accelerate_config}"""
)
print(lowerCAmelCase_ )
__lowerCAmelCase = accelerate_config
return info
def a_ ( ):
__lowerCAmelCase = env_command_parser()
__lowerCAmelCase = parser.parse_args()
env_command(lowerCAmelCase_ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 53
| 0
|
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class _a ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase_ : Any = (
{
"""feature-extraction""": TFMobileBertModel,
"""fill-mask""": TFMobileBertForMaskedLM,
"""question-answering""": TFMobileBertForQuestionAnswering,
"""text-classification""": TFMobileBertForSequenceClassification,
"""token-classification""": TFMobileBertForTokenClassification,
"""zero-shot""": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase_ : List[Any] = False
lowerCamelCase_ : Tuple = False
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ):
__A : Tuple = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class in get_values(lowerCAmelCase_ ):
__A : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class _a ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ):
__A : Optional[int] = parent
__A : List[Any] = batch_size
__A : Union[str, Any] = seq_length
__A : str = is_training
__A : Optional[int] = use_input_mask
__A : List[Any] = use_token_type_ids
__A : Union[str, Any] = use_labels
__A : Optional[Any] = vocab_size
__A : int = hidden_size
__A : Union[str, Any] = num_hidden_layers
__A : Union[str, Any] = num_attention_heads
__A : Tuple = intermediate_size
__A : Optional[int] = hidden_act
__A : int = hidden_dropout_prob
__A : Any = attention_probs_dropout_prob
__A : Dict = max_position_embeddings
__A : Optional[Any] = type_vocab_size
__A : str = type_sequence_label_size
__A : List[Any] = initializer_range
__A : Dict = num_labels
__A : Any = num_choices
__A : int = scope
__A : List[Any] = embedding_size
def __UpperCAmelCase( self ):
__A : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A : int = None
if self.use_input_mask:
__A : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__A : Dict = None
if self.use_token_type_ids:
__A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__A : Optional[Any] = None
__A : List[Any] = None
__A : Dict = None
if self.use_labels:
__A : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A : Tuple = ids_tensor([self.batch_size] , self.num_choices )
__A : Any = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : Any = TFMobileBertModel(config=lowerCAmelCase_ )
__A : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__A : int = model(lowerCAmelCase_ )
__A : List[Any] = [input_ids, input_mask]
__A : Optional[int] = model(lowerCAmelCase_ )
__A : Optional[int] = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : Any = TFMobileBertForMaskedLM(config=lowerCAmelCase_ )
__A : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__A : Optional[int] = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : int = TFMobileBertForNextSentencePrediction(config=lowerCAmelCase_ )
__A : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__A : Tuple = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : List[Any] = TFMobileBertForPreTraining(config=lowerCAmelCase_ )
__A : str = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__A : int = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : List[str] = self.num_labels
__A : int = TFMobileBertForSequenceClassification(config=lowerCAmelCase_ )
__A : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__A : int = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : List[str] = self.num_choices
__A : Optional[int] = TFMobileBertForMultipleChoice(config=lowerCAmelCase_ )
__A : List[str] = tf.tile(tf.expand_dims(lowerCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
__A : List[str] = tf.tile(tf.expand_dims(lowerCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
__A : int = tf.tile(tf.expand_dims(lowerCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
__A : List[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
__A : List[Any] = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : Dict = self.num_labels
__A : Optional[Any] = TFMobileBertForTokenClassification(config=lowerCAmelCase_ )
__A : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__A : List[Any] = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : int = TFMobileBertForQuestionAnswering(config=lowerCAmelCase_ )
__A : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__A : str = model(lowerCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase( self ):
__A : Dict = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) : Tuple = config_and_inputs
__A : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def __UpperCAmelCase( self ):
__A : str = TFMobileBertModelTest.TFMobileBertModelTester(self )
__A : Dict = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def __UpperCAmelCase( self ):
self.config_tester.run_common_tests()
def __UpperCAmelCase( self ):
__A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCAmelCase_ )
def __UpperCAmelCase( self ):
__A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCAmelCase_ )
def __UpperCAmelCase( self ):
__A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCAmelCase_ )
def __UpperCAmelCase( self ):
__A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCAmelCase_ )
def __UpperCAmelCase( self ):
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCAmelCase_ )
def __UpperCAmelCase( self ):
__A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCAmelCase_ )
def __UpperCAmelCase( self ):
__A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCAmelCase_ )
def __UpperCAmelCase( self ):
__A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCAmelCase_ )
@slow
def __UpperCAmelCase( self ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
__A : int = TFMobileBertModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_tf
class _a ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase( self ):
__A : Dict = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased" )
__A : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
__A : Any = model(lowerCAmelCase_ )[0]
__A : Optional[int] = [1, 6, 30_522]
self.assertEqual(output.shape , lowerCAmelCase_ )
__A : Dict = tf.constant(
[
[
[-4.5_91_95_47, -9.24_82_95, -9.64_52_56],
[-6.7_30_61_75, -6.44_02_84, -6.6_05_28_37],
[-7.2_74_35_06, -6.7_84_79_15, -6.02_46_73],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4 )
| 520
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def a_ ( ):
__lowerCAmelCase = ArgumentParser(
description=(
'PyTorch TPU distributed training launch '
'helper utility that will spawn up '
'multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores', type=lowerCAmelCase_, default=1, help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script', type=lowerCAmelCase_, help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
), )
# rest from the training program
parser.add_argument('training_script_args', nargs=lowerCAmelCase_ )
return parser.parse_args()
def a_ ( ):
__lowerCAmelCase = parse_args()
# Import training_script as a module.
__lowerCAmelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__lowerCAmelCase = script_fpath.stem
__lowerCAmelCase = importlib.import_module(lowerCAmelCase_ )
# Patch sys.argv
__lowerCAmelCase = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn, args=(), nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 53
| 0
|
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class lowercase__ ( unittest.TestCase):
@require_torch
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''' )
SCREAMING_SNAKE_CASE : List[Any] = load_dataset('''ashraq/esc50''' )
SCREAMING_SNAKE_CASE : Dict = dataset['''train''']['''audio'''][-1]['''array''']
SCREAMING_SNAKE_CASE : int = audio_classifier(lowerCAmelCase_ , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [{'''score''': 0.501, '''label''': '''Sound of a dog'''}, {'''score''': 0.499, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''' )
def __A ( self : Optional[Any] ):
'''simple docstring'''
pass
@slow
@require_torch
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
SCREAMING_SNAKE_CASE : str = load_dataset('''ashraq/esc50''' )
SCREAMING_SNAKE_CASE : Dict = dataset['''train''']['''audio'''][-1]['''array''']
SCREAMING_SNAKE_CASE : str = audio_classifier(lowerCAmelCase_ , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
] , )
SCREAMING_SNAKE_CASE : str = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
SCREAMING_SNAKE_CASE : Any = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''' )
def __A ( self : List[Any] ):
'''simple docstring'''
pass
| 248
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict=1_3 , lowerCAmelCase_ : str=3_2 , lowerCAmelCase_ : Optional[Any]=3 , lowerCAmelCase_ : Any=4 , lowerCAmelCase_ : str=[1_0, 2_0, 3_0, 4_0] , lowerCAmelCase_ : Tuple=[2, 2, 3, 2] , lowerCAmelCase_ : str=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[int]=3_7 , lowerCAmelCase_ : Dict="gelu" , lowerCAmelCase_ : List[Any]=1_0 , lowerCAmelCase_ : str=0.02 , lowerCAmelCase_ : Dict=["stage2", "stage3", "stage4"] , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : List[Any]=None , ) -> int:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = num_stages
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = depths
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = out_features
__lowerCAmelCase = num_labels
__lowerCAmelCase = scope
__lowerCAmelCase = num_stages
def lowercase ( self : Dict ) -> List[str]:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def lowercase ( self : List[str] ) -> Union[str, Any]:
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def lowercase ( self : Dict ) -> List[str]:
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_1_2 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=lowerCAmelCase_ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=4_0 , auxiliary_channels=2_5_6 , auxiliary_num_convs=1 , auxiliary_concat_input=lowerCAmelCase_ , loss_ignore_index=2_5_5 , num_labels=self.num_labels , )
def lowercase ( self : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int ) -> Optional[Any]:
__lowerCAmelCase = UperNetForSemanticSegmentation(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
a_ = {"""image-segmentation""": UperNetForSemanticSegmentation} if is_torch_available() else {}
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def lowercase ( self : Optional[int] ) -> Dict:
__lowerCAmelCase = UperNetModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=3_7 )
def lowercase ( self : List[str] ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self : Tuple ) -> Union[str, Any]:
return
def lowercase ( self : Optional[int] ) -> Optional[Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowercase ( self : List[Any] ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCAmelCase_ )
@unittest.skip(reason='UperNet does not use inputs_embeds' )
def lowercase ( self : Optional[int] ) -> Dict:
pass
@unittest.skip(reason='UperNet does not support input and output embeddings' )
def lowercase ( self : Optional[Any] ) -> Dict:
pass
@unittest.skip(reason='UperNet does not have a base model' )
def lowercase ( self : Optional[int] ) -> List[Any]:
pass
@unittest.skip(reason='UperNet does not have a base model' )
def lowercase ( self : str ) -> Dict:
pass
@require_torch_multi_gpu
@unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def lowercase ( self : Optional[Any] ) -> Optional[int]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase ( self : Tuple ) -> List[Any]:
pass
def lowercase ( self : Union[str, Any] ) -> Tuple:
def check_hidden_states_output(lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] ):
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
__lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase_ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Any ) -> Tuple:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = _config_zero_init(lowerCAmelCase_ )
__lowerCAmelCase = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(config=lowerCAmelCase_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason='UperNet does not have tied weights' )
def lowercase ( self : Any ) -> int:
pass
@slow
def lowercase ( self : Optional[int] ) -> Optional[int]:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = UperNetForSemanticSegmentation.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def a_ ( ):
__lowerCAmelCase = hf_hub_download(
repo_id='hf-internal-testing/fixtures_ade20k', repo_type='dataset', filename='ADE_val_00000001.jpg' )
__lowerCAmelCase = Image.open(lowerCAmelCase_ ).convert('RGB' )
return image
@require_torch
@require_vision
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Dict ) -> Union[str, Any]:
__lowerCAmelCase = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny' )
__lowerCAmelCase = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny' ).to(lowerCAmelCase_ )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = processor(images=lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
__lowerCAmelCase = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
def lowercase ( self : List[Any] ) -> List[str]:
__lowerCAmelCase = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny' )
__lowerCAmelCase = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny' ).to(lowerCAmelCase_ )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = processor(images=lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
__lowerCAmelCase = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 53
| 0
|
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCamelCase ( _UpperCamelCase,unittest.TestCase ):
'''simple docstring'''
a_ : Dict = ProphetNetTokenizer
a_ : Tuple = False
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
__lowerCamelCase : int = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__lowerCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def _snake_case ( self : Tuple , _lowerCamelCase : str ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = """UNwant\u00E9d,running"""
__lowerCamelCase : str = """unwanted, running"""
return input_text, output_text
def _snake_case ( self : str ):
'''simple docstring'''
__lowerCamelCase : List[str] = self.tokenizer_class(self.vocab_file )
__lowerCamelCase : Optional[int] = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowerCAmelCase_ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase : List[str] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowerCamelCase : Any = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowerCamelCase : Any = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowerCamelCase : int = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase : List[str] = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase : str = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowerCamelCase : str = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowerCamelCase : Tuple = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _snake_case ( self : str ):
'''simple docstring'''
__lowerCamelCase : List[Any] = BasicTokenizer(do_lower_case=lowerCAmelCase_ , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowerCamelCase : Tuple = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
__lowerCamelCase : Any = {}
for i, token in enumerate(lowerCAmelCase_ ):
__lowerCamelCase : Optional[int] = i
__lowerCamelCase : Optional[int] = WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
@require_torch
def _snake_case ( self : str ):
'''simple docstring'''
__lowerCamelCase : Any = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
__lowerCamelCase : str = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__lowerCamelCase : int = [1_0_3_7, 2_1_4_6, 2_0_4_2_3, 2_0_0_5, 7_6_8_0, 7_8_4_9, 3_9_8_9, 1_0_1_2, 1_0_2]
__lowerCamelCase : int = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors="""pt""" )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCamelCase : Dict = list(batch.input_ids.numpy()[0] )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def _snake_case ( self : Dict ):
'''simple docstring'''
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def _snake_case ( self : int ):
'''simple docstring'''
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
@slow
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
__lowerCamelCase : Optional[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCAmelCase_ )
__lowerCamelCase : List[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCAmelCase_ )
__lowerCamelCase : str = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
__lowerCamelCase : str = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
assert encoded_sentence == text + [1_0_2]
assert encoded_pair == text + [1_0_2] + text_a + [1_0_2]
| 519
|
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Optional[Any] ):
assert isinstance(lowerCAmelCase_, lowerCAmelCase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : str, lowerCAmelCase_ : int ):
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, cache_dir=lowerCAmelCase_, keep_in_memory=lowerCAmelCase_ ).read()
_check_text_dataset(lowerCAmelCase_, lowerCAmelCase_ )
@pytest.mark.parametrize(
'features', [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
], )
def a_ ( lowerCAmelCase_ : Any, lowerCAmelCase_ : Dict, lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(lowerCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, features=lowerCAmelCase_, cache_dir=lowerCAmelCase_ ).read()
_check_text_dataset(lowerCAmelCase_, lowerCAmelCase_ )
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Any ):
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, cache_dir=lowerCAmelCase_, split=lowerCAmelCase_ ).read()
_check_text_dataset(lowerCAmelCase_, lowerCAmelCase_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type', [str, list] )
def a_ ( lowerCAmelCase_ : Dict, lowerCAmelCase_ : Any, lowerCAmelCase_ : Dict ):
if issubclass(lowerCAmelCase_, lowerCAmelCase_ ):
__lowerCAmelCase = text_path
elif issubclass(lowerCAmelCase_, lowerCAmelCase_ ):
__lowerCAmelCase = [text_path]
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, cache_dir=lowerCAmelCase_ ).read()
_check_text_dataset(lowerCAmelCase_, lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : int, lowerCAmelCase_ : Tuple=("train",) ):
assert isinstance(lowerCAmelCase_, lowerCAmelCase_ )
for split in splits:
__lowerCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def a_ ( lowerCAmelCase_ : Tuple, lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Dict ):
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase = TextDatasetReader({'train': text_path}, cache_dir=lowerCAmelCase_, keep_in_memory=lowerCAmelCase_ ).read()
_check_text_datasetdict(lowerCAmelCase_, lowerCAmelCase_ )
@pytest.mark.parametrize(
'features', [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
], )
def a_ ( lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : List[Any] ):
__lowerCAmelCase = tmp_path / 'cache'
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(lowerCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = TextDatasetReader({'train': text_path}, features=lowerCAmelCase_, cache_dir=lowerCAmelCase_ ).read()
_check_text_datasetdict(lowerCAmelCase_, lowerCAmelCase_ )
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : str, lowerCAmelCase_ : Optional[int] ):
if split:
__lowerCAmelCase = {split: text_path}
else:
__lowerCAmelCase = 'train'
__lowerCAmelCase = {'train': text_path, 'test': text_path}
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, cache_dir=lowerCAmelCase_ ).read()
_check_text_datasetdict(lowerCAmelCase_, lowerCAmelCase_, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 53
| 0
|
'''simple docstring'''
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class _lowercase ( unittest.TestCase ):
def UpperCamelCase ( self ) -> str:
snake_case = '''hf-internal-testing/tiny-random-t5'''
snake_case = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
snake_case = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ )
snake_case = tokenizer('''This is me''' , return_tensors='''pt''' )
snake_case = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
snake_case = model.generate(**lowerCAmelCase_ )
snake_case = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase_ )
snake_case = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
snake_case = model_reloaded.generate(**lowerCAmelCase_ )
self.assertTrue(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ ) )
def UpperCamelCase ( self ) -> Dict:
snake_case = '''hf-internal-testing/tiny-random-t5'''
snake_case = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ )
snake_case = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(lowerCAmelCase_ ):
model.save_pretrained(lowerCAmelCase_ )
snake_case = model.reverse_bettertransformer()
model.save_pretrained(lowerCAmelCase_ )
| 342
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : Union[str, Any] = logging.get_logger(__name__)
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : int=False ):
__lowerCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__lowerCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def a_ ( lowerCAmelCase_ : Any, lowerCAmelCase_ : Tuple, lowerCAmelCase_ : Optional[int]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__lowerCAmelCase = ''
else:
__lowerCAmelCase = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[
: config.hidden_size, :
]
__lowerCAmelCase = in_proj_bias[: config.hidden_size]
__lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
__lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def a_ ( lowerCAmelCase_ : List[str] ):
__lowerCAmelCase = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_, lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : int, lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = dct.pop(lowerCAmelCase_ )
__lowerCAmelCase = val
def a_ ( ):
__lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCAmelCase = Image.open(requests.get(lowerCAmelCase_, stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Optional[Any]=True ):
__lowerCAmelCase = ViTConfig()
# patch_size
if model_name[-1] == "8":
__lowerCAmelCase = 8
# set labels if required
if not base_model:
__lowerCAmelCase = 1000
__lowerCAmelCase = 'huggingface/label-files'
__lowerCAmelCase = 'imagenet-1k-id2label.json'
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) )
__lowerCAmelCase = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
__lowerCAmelCase = 384
__lowerCAmelCase = 1536
__lowerCAmelCase = 12
__lowerCAmelCase = 6
# load original model from torch hub
__lowerCAmelCase = torch.hub.load('facebookresearch/dino:main', lowerCAmelCase_ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
__lowerCAmelCase = original_model.state_dict()
if base_model:
remove_classification_head_(lowerCAmelCase_ )
__lowerCAmelCase = create_rename_keys(lowerCAmelCase_, base_model=lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# load HuggingFace model
if base_model:
__lowerCAmelCase = ViTModel(lowerCAmelCase_, add_pooling_layer=lowerCAmelCase_ ).eval()
else:
__lowerCAmelCase = ViTForImageClassification(lowerCAmelCase_ ).eval()
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image, prepared by ViTImageProcessor
__lowerCAmelCase = ViTImageProcessor()
__lowerCAmelCase = image_processor(images=prepare_img(), return_tensors='pt' )
__lowerCAmelCase = encoding['pixel_values']
__lowerCAmelCase = model(lowerCAmelCase_ )
if base_model:
__lowerCAmelCase = original_model(lowerCAmelCase_ )
assert torch.allclose(lowerCAmelCase_, outputs.last_hidden_state[:, 0, :], atol=1E-1 )
else:
__lowerCAmelCase = original_model(lowerCAmelCase_ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase_, outputs.logits, atol=1E-3 )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_snake_case : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
_snake_case : List[Any] = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 53
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : int = logging.get_logger(__name__)
class UpperCAmelCase_ ( _UpperCamelCase ):
'''simple docstring'''
A : Union[str, Any] = 'timm_backbone'
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> Dict:
super().__init__(**lowerCAmelCase_ )
snake_case_ : Tuple = backbone
snake_case_ : str = num_channels
snake_case_ : Tuple = features_only
snake_case_ : Optional[Any] = use_pretrained_backbone
snake_case_ : Union[str, Any] = True
snake_case_ : List[Any] = out_indices if out_indices is not None else (-1,)
| 568
|
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Union[str, Any] ) -> List[str]:
__lowerCAmelCase = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : str ) -> Any:
__lowerCAmelCase = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : Tuple ) -> Optional[int]:
__lowerCAmelCase = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : List[Any] ) -> List[str]:
__lowerCAmelCase = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : List[Any] ) -> int:
__lowerCAmelCase = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : str ) -> str:
__lowerCAmelCase = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : str ) -> List[Any]:
__lowerCAmelCase = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : str ) -> List[str]:
# pass variant but use the non-variant filenames
__lowerCAmelCase = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : str ) -> Union[str, Any]:
__lowerCAmelCase = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : str ) -> List[Any]:
__lowerCAmelCase = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : List[str] ) -> List[Any]:
# pass variant but use the non-variant filenames
__lowerCAmelCase = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
__lowerCAmelCase = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
| 53
| 0
|
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
"""simple docstring"""
def __init__( self , lowercase , lowercase=2 , lowercase=8 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=16 , lowercase=5 , lowercase=2 , lowercase=36 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , ) -> List[Any]:
'''simple docstring'''
a__ : List[Any] = parent
a__ : Optional[Any] = batch_size
a__ : List[Any] = seq_length
a__ : Optional[Any] = is_training
a__ : int = use_input_mask
a__ : int = use_token_type_ids
a__ : List[str] = use_labels
a__ : Optional[int] = vocab_size
a__ : Tuple = hidden_size
a__ : Dict = num_hidden_layers
a__ : Dict = num_attention_heads
a__ : str = intermediate_size
a__ : Optional[int] = hidden_act
a__ : str = hidden_dropout_prob
a__ : Union[str, Any] = attention_probs_dropout_prob
a__ : Dict = max_position_embeddings
a__ : List[str] = type_vocab_size
a__ : Optional[Any] = type_sequence_label_size
a__ : Tuple = initializer_range
a__ : Optional[Any] = num_labels
a__ : Optional[Any] = num_choices
a__ : Optional[int] = scope
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a__ : Union[str, Any] = None
if self.use_input_mask:
a__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length])
a__ : List[str] = None
if self.use_token_type_ids:
a__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a__ : List[Any] = None
a__ : Any = None
a__ : Dict = None
if self.use_labels:
a__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a__ : str = ids_tensor([self.batch_size] , self.num_choices)
a__ : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : Any = self.get_config()
a__ : Optional[Any] = 300
return config
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : int = self.prepare_config_and_inputs()
a__ : Optional[int] = True
a__ : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
a__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> List[str]:
'''simple docstring'''
a__ : List[str] = MraModel(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a__ : Dict = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_)
a__ : Any = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_)
a__ : Any = model(lowerCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Tuple:
'''simple docstring'''
a__ : str = True
a__ : List[Any] = MraModel(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a__ : Optional[Any] = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )
a__ : Optional[int] = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , )
a__ : List[str] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> List[str]:
'''simple docstring'''
a__ : Optional[Any] = MraForMaskedLM(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a__ : Tuple = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> str:
'''simple docstring'''
a__ : Any = MraForQuestionAnswering(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a__ : str = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Optional[Any]:
'''simple docstring'''
a__ : Union[str, Any] = self.num_labels
a__ : List[Any] = MraForSequenceClassification(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a__ : List[str] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Any:
'''simple docstring'''
a__ : str = self.num_labels
a__ : Optional[Any] = MraForTokenClassification(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a__ : Union[str, Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> List[Any]:
'''simple docstring'''
a__ : List[str] = self.num_choices
a__ : Any = MraForMultipleChoice(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a__ : Optional[Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a__ : Union[str, Any] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a__ : int = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a__ : str = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : int = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : Any = config_and_inputs
a__ : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__A : Any = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
__A : Optional[int] = False
__A : Optional[int] = False
__A : str = False
__A : Tuple = False
__A : int = ()
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : str = MraModelTester(self)
a__ : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37)
def __lowercase ( self) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_)
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a__ : Tuple = type
self.model_tester.create_and_check_model(*lowerCAmelCase_)
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase_)
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase_)
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase_)
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase_)
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase_)
@slow
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : str = MraModel.from_pretrained(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
@unittest.skip(reason='MRA does not output attentions')
def __lowercase ( self) -> Tuple:
'''simple docstring'''
return
@require_torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : Tuple = MraModel.from_pretrained('uw-madison/mra-base-512-4')
a__ : Any = torch.arange(256).unsqueeze(0)
with torch.no_grad():
a__ : Optional[int] = model(lowerCAmelCase_)[0]
a__ : List[Any] = torch.Size((1, 256, 768))
self.assertEqual(output.shape , lowerCAmelCase_)
a__ : int = torch.tensor(
[[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4))
@slow
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : str = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4')
a__ : List[Any] = torch.arange(256).unsqueeze(0)
with torch.no_grad():
a__ : Dict = model(lowerCAmelCase_)[0]
a__ : Optional[int] = 5_0265
a__ : int = torch.Size((1, 256, vocab_size))
self.assertEqual(output.shape , lowerCAmelCase_)
a__ : str = torch.tensor(
[[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4))
@slow
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : Tuple = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3')
a__ : Dict = torch.arange(4096).unsqueeze(0)
with torch.no_grad():
a__ : Any = model(lowerCAmelCase_)[0]
a__ : Union[str, Any] = 5_0265
a__ : Union[str, Any] = torch.Size((1, 4096, vocab_size))
self.assertEqual(output.shape , lowerCAmelCase_)
a__ : List[Any] = torch.tensor(
[[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4))
| 302
|
import math
def a_ ( lowerCAmelCase_ : list, lowerCAmelCase_ : int ):
__lowerCAmelCase = len(lowerCAmelCase_ )
__lowerCAmelCase = int(math.floor(math.sqrt(lowerCAmelCase_ ) ) )
__lowerCAmelCase = 0
while arr[min(lowerCAmelCase_, lowerCAmelCase_ ) - 1] < x:
__lowerCAmelCase = step
step += int(math.floor(math.sqrt(lowerCAmelCase_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
__lowerCAmelCase = prev + 1
if prev == min(lowerCAmelCase_, lowerCAmelCase_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
_snake_case : List[str] = input('Enter numbers separated by a comma:\n').strip()
_snake_case : Optional[Any] = [int(item) for item in user_input.split(',')]
_snake_case : List[str] = int(input('Enter the number to be searched:\n'))
_snake_case : Optional[int] = jump_search(arr, x)
if res == -1:
print('Number not found!')
else:
print(F"""Number {x} is at index {res}""")
| 53
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
_lowerCAmelCase: List[Any] = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase: List[Any] = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase: Union[str, Any] = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
_lowerCAmelCase: List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 20
|
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : List[Any], lowerCAmelCase_ : str ):
# Initialise PyTorch model
__lowerCAmelCase = RemBertConfig.from_json_file(lowerCAmelCase_ )
print('Building PyTorch model from configuration: {}'.format(str(lowerCAmelCase_ ) ) )
__lowerCAmelCase = RemBertModel(lowerCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# Save pytorch-model
print('Save PyTorch model to {}'.format(lowerCAmelCase_ ) )
torch.save(model.state_dict(), lowerCAmelCase_ )
if __name__ == "__main__":
_snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_snake_case : int = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 53
| 0
|
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def __a ( self ) -> Optional[int]:
"""simple docstring"""
lowercase__ : str = ["a", "b", "c"]
# Defaults to last layer if both are None
lowercase__ , lowercase__ : Tuple = get_aligned_output_features_output_indices(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , ["c"] )
self.assertEqual(lowerCAmelCase_ , [2] )
# Out indices set to match out features
lowercase__ , lowercase__ : Optional[Any] = get_aligned_output_features_output_indices(["a", "c"] , lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , ["a", "c"] )
self.assertEqual(lowerCAmelCase_ , [0, 2] )
# Out features set to match out indices
lowercase__ , lowercase__ : Dict = get_aligned_output_features_output_indices(lowerCAmelCase_ , [0, 2] , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , ["a", "c"] )
self.assertEqual(lowerCAmelCase_ , [0, 2] )
# Out features selected from negative indices
lowercase__ , lowercase__ : int = get_aligned_output_features_output_indices(lowerCAmelCase_ , [-3, -1] , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , ["a", "c"] )
self.assertEqual(lowerCAmelCase_ , [-3, -1] )
def __a ( self ) -> Dict:
"""simple docstring"""
with self.assertRaises(lowerCAmelCase_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , lowerCAmelCase_ )
# Out features must be a list
with self.assertRaises(lowerCAmelCase_ ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(lowerCAmelCase_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(lowerCAmelCase_ ):
verify_out_features_out_indices(lowerCAmelCase_ , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(lowerCAmelCase_ ):
verify_out_features_out_indices(lowerCAmelCase_ , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(lowerCAmelCase_ ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(lowerCAmelCase_ ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(lowerCAmelCase_ ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def __a ( self ) -> List[str]:
"""simple docstring"""
lowercase__ : str = BackboneMixin()
lowercase__ : Optional[int] = ["a", "b", "c"]
lowercase__ : Optional[Any] = ["a", "c"]
lowercase__ : Any = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
lowercase__ : str = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
lowercase__ : Optional[int] = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 397
|
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : Any = logging.get_logger(__name__)
def a_ ( lowerCAmelCase_ : str ):
__lowerCAmelCase = SwinConfig.from_pretrained(
'microsoft/swin-tiny-patch4-window7-224', out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
__lowerCAmelCase = MaskFormerConfig(backbone_config=lowerCAmelCase_ )
__lowerCAmelCase = 'huggingface/label-files'
if "ade20k-full" in model_name:
# this should be ok
__lowerCAmelCase = 847
__lowerCAmelCase = 'maskformer-ade20k-full-id2label.json'
elif "ade" in model_name:
# this should be ok
__lowerCAmelCase = 150
__lowerCAmelCase = 'ade20k-id2label.json'
elif "coco-stuff" in model_name:
# this should be ok
__lowerCAmelCase = 171
__lowerCAmelCase = 'maskformer-coco-stuff-id2label.json'
elif "coco" in model_name:
# TODO
__lowerCAmelCase = 133
__lowerCAmelCase = 'coco-panoptic-id2label.json'
elif "cityscapes" in model_name:
# this should be ok
__lowerCAmelCase = 19
__lowerCAmelCase = 'cityscapes-id2label.json'
elif "vistas" in model_name:
# this should be ok
__lowerCAmelCase = 65
__lowerCAmelCase = 'mapillary-vistas-id2label.json'
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) )
__lowerCAmelCase = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
return config
def a_ ( lowerCAmelCase_ : Tuple ):
__lowerCAmelCase = []
# stem
# fmt: off
rename_keys.append(('backbone.patch_embed.proj.weight', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.proj.bias', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'model.pixel_level_module.encoder.model.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'model.pixel_level_module.encoder.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(('sem_seg_head.layer_4.weight', 'model.pixel_level_module.decoder.fpn.stem.0.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.weight', 'model.pixel_level_module.decoder.fpn.stem.1.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.bias', 'model.pixel_level_module.decoder.fpn.stem.1.bias') )
for source_index, target_index in zip(range(3, 0, -1 ), range(0, 3 ) ):
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(('sem_seg_head.mask_features.weight', 'model.pixel_level_module.decoder.mask_projection.weight') )
rename_keys.append(('sem_seg_head.mask_features.bias', 'model.pixel_level_module.decoder.mask_projection.bias') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.weight', 'model.transformer_module.decoder.layernorm.weight') )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.bias', 'model.transformer_module.decoder.layernorm.bias') )
# heads on top
rename_keys.append(('sem_seg_head.predictor.query_embed.weight', 'model.transformer_module.queries_embedder.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.weight', 'model.transformer_module.input_projection.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.bias', 'model.transformer_module.input_projection.bias') )
rename_keys.append(('sem_seg_head.predictor.class_embed.weight', 'class_predictor.weight') )
rename_keys.append(('sem_seg_head.predictor.class_embed.bias', 'class_predictor.bias') )
for i in range(3 ):
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Tuple ):
__lowerCAmelCase = dct.pop(lowerCAmelCase_ )
__lowerCAmelCase = val
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : int ):
__lowerCAmelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowerCAmelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
__lowerCAmelCase = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[:dim, :]
__lowerCAmelCase = in_proj_bias[: dim]
__lowerCAmelCase = in_proj_weight[
dim : dim * 2, :
]
__lowerCAmelCase = in_proj_bias[
dim : dim * 2
]
__lowerCAmelCase = in_proj_weight[
-dim :, :
]
__lowerCAmelCase = in_proj_bias[-dim :]
# fmt: on
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : Dict ):
# fmt: off
__lowerCAmelCase = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
__lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[: hidden_size, :]
__lowerCAmelCase = in_proj_bias[:config.hidden_size]
__lowerCAmelCase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowerCAmelCase = in_proj_bias[hidden_size : hidden_size * 2]
__lowerCAmelCase = in_proj_weight[-hidden_size :, :]
__lowerCAmelCase = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
__lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[: hidden_size, :]
__lowerCAmelCase = in_proj_bias[:config.hidden_size]
__lowerCAmelCase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowerCAmelCase = in_proj_bias[hidden_size : hidden_size * 2]
__lowerCAmelCase = in_proj_weight[-hidden_size :, :]
__lowerCAmelCase = in_proj_bias[-hidden_size :]
# fmt: on
def a_ ( ):
__lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCAmelCase = Image.open(requests.get(lowerCAmelCase_, stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : str, lowerCAmelCase_ : str, lowerCAmelCase_ : bool = False ):
__lowerCAmelCase = get_maskformer_config(lowerCAmelCase_ )
# load original state_dict
with open(lowerCAmelCase_, 'rb' ) as f:
__lowerCAmelCase = pickle.load(lowerCAmelCase_ )
__lowerCAmelCase = data['model']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__lowerCAmelCase = create_rename_keys(lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
read_in_swin_q_k_v(lowerCAmelCase_, config.backbone_config )
read_in_decoder_q_k_v(lowerCAmelCase_, lowerCAmelCase_ )
# update to torch tensors
for key, value in state_dict.items():
__lowerCAmelCase = torch.from_numpy(lowerCAmelCase_ )
# load 🤗 model
__lowerCAmelCase = MaskFormerForInstanceSegmentation(lowerCAmelCase_ )
model.eval()
for name, param in model.named_parameters():
print(lowerCAmelCase_, param.shape )
__lowerCAmelCase , __lowerCAmelCase = model.load_state_dict(lowerCAmelCase_, strict=lowerCAmelCase_ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCAmelCase_ ) == 0, F"""Unexpected keys: {unexpected_keys}"""
# verify results
__lowerCAmelCase = prepare_img()
if "vistas" in model_name:
__lowerCAmelCase = 65
elif "cityscapes" in model_name:
__lowerCAmelCase = 6_5535
else:
__lowerCAmelCase = 255
__lowerCAmelCase = True if 'ade' in model_name else False
__lowerCAmelCase = MaskFormerImageProcessor(ignore_index=lowerCAmelCase_, reduce_labels=lowerCAmelCase_ )
__lowerCAmelCase = image_processor(lowerCAmelCase_, return_tensors='pt' )
__lowerCAmelCase = model(**lowerCAmelCase_ )
print('Logits:', outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__lowerCAmelCase = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3], lowerCAmelCase_, atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
print('Pushing model and image processor to the hub...' )
model.push_to_hub(F"""nielsr/{model_name}""" )
image_processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
_snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_snake_case : List[str] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 53
| 0
|
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : int=13 , UpperCamelCase : str=7 , UpperCamelCase : Dict=True , UpperCamelCase : List[Any]=True , UpperCamelCase : List[Any]=True , UpperCamelCase : str=True , UpperCamelCase : str=99 , UpperCamelCase : str=32 , UpperCamelCase : Dict=5 , UpperCamelCase : int=4 , UpperCamelCase : Union[str, Any]=37 , UpperCamelCase : List[str]="gelu" , UpperCamelCase : List[str]=0.1 , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : Dict=5_12 , UpperCamelCase : Optional[int]=16 , UpperCamelCase : List[str]=2 , UpperCamelCase : List[str]=0.02 , UpperCamelCase : Optional[Any]=4 , ):
"""simple docstring"""
_lowercase : Optional[Any] = parent
_lowercase : List[str] = batch_size
_lowercase : List[str] = seq_length
_lowercase : Dict = is_training
_lowercase : str = use_attention_mask
_lowercase : List[str] = use_token_type_ids
_lowercase : int = use_labels
_lowercase : Optional[Any] = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : str = num_hidden_layers
_lowercase : int = num_attention_heads
_lowercase : int = intermediate_size
_lowercase : List[str] = hidden_act
_lowercase : List[Any] = hidden_dropout_prob
_lowercase : Dict = attention_probs_dropout_prob
_lowercase : List[Any] = max_position_embeddings
_lowercase : List[str] = type_vocab_size
_lowercase : Any = type_sequence_label_size
_lowercase : Optional[Any] = initializer_range
_lowercase : Optional[Any] = num_choices
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
_lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : Optional[int] = None
if self.use_attention_mask:
_lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Optional[Any] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=lowerCAmelCase_ , )
return config, input_ids, attention_mask
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
_lowercase : List[str] = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase : Any = config_and_inputs
_lowercase : int = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase__ ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
_lowercase : List[Any] = FlaxDistilBertModelTester(self )
@slow
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
_lowercase : str = model_class_name.from_pretrained('''distilbert-base-uncased''' )
_lowercase : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase_ )
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
_lowercase : List[str] = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
_lowercase : Any = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
_lowercase : Optional[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_lowercase : Dict = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
_lowercase : Tuple = (1, 11, 7_68)
self.assertEqual(output.shape , lowerCAmelCase_ )
_lowercase : int = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1E-4 ) )
| 322
|
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
_snake_case : List[Any] = True
from torch.cuda.amp import autocast
_snake_case : Dict = logging.getLogger(__name__)
def a_ ( lowerCAmelCase_ : str=None, lowerCAmelCase_ : str=None ):
return field(default_factory=lambda: default, metadata=lowerCAmelCase_ )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
a_ = field(
default=0.1 , metadata={"""help""": """The dropout ratio for the attention probabilities."""} )
a_ = field(
default=0.1 , metadata={"""help""": """The dropout ratio for activations inside the fully connected layer."""} )
a_ = field(
default=0.1 , metadata={
"""help""": """The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."""
} , )
a_ = field(
default=0.1 , metadata={"""help""": """The dropout probabilitiy for all 1D convolutional layers in feature extractor."""} , )
a_ = field(
default=0.05 , metadata={
"""help""": (
"""Propability of each feature vector along the time axis to be chosen as the start of the vector"""
"""span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"""
"""vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."""
)
} , )
a_ = field(default=0.0 , metadata={"""help""": """The LayerDrop probability."""} )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
a_ = field(
default="""train+validation""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of validation examples to this """
"""value if set."""
)
} , )
a_ = list_field(
default=[""",""", """?""", """.""", """!""", """-""", """;""", """:""", """\"\"""", """%""", """'""", """\"""", """�"""] , metadata={"""help""": """A list of characters to remove from the transcripts."""} , )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = 42
a_ = True
a_ = None
a_ = None
a_ = None
a_ = None
def __call__( self : int , lowerCAmelCase_ : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
__lowerCAmelCase = [{'input_values': feature['input_values']} for feature in features]
__lowerCAmelCase = [{'input_ids': feature['labels']} for feature in features]
__lowerCAmelCase = self.processor.pad(
lowerCAmelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
__lowerCAmelCase = self.processor.pad(
labels=lowerCAmelCase_ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='pt' , )
# replace padding with -100 to ignore loss correctly
__lowerCAmelCase = labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_0_0 )
__lowerCAmelCase = labels
return batch
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def lowercase ( self : Tuple , lowerCAmelCase_ : nn.Module , lowerCAmelCase_ : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
model.train()
__lowerCAmelCase = self._prepare_inputs(lowerCAmelCase_ )
if self.use_amp:
with autocast():
__lowerCAmelCase = self.compute_loss(lowerCAmelCase_ , lowerCAmelCase_ )
else:
__lowerCAmelCase = self.compute_loss(lowerCAmelCase_ , lowerCAmelCase_ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
__lowerCAmelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__lowerCAmelCase = loss.sum() / (inputs['labels'] >= 0).sum()
else:
raise ValueError(f"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
__lowerCAmelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowerCAmelCase_ ).backward()
elif self.use_apex:
with amp.scale_loss(lowerCAmelCase_ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowerCAmelCase_ )
else:
loss.backward()
return loss.detach()
def a_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout )], )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s', lowerCAmelCase_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
__lowerCAmelCase = datasets.load_dataset(
'common_voice', data_args.dataset_config_name, split=data_args.train_split_name )
__lowerCAmelCase = datasets.load_dataset('common_voice', data_args.dataset_config_name, split='test' )
# Create and save tokenizer
__lowerCAmelCase = F"""[{"".join(data_args.chars_to_ignore )}]"""
def remove_special_characters(lowerCAmelCase_ : Any ):
__lowerCAmelCase = re.sub(lowerCAmelCase_, '', batch['sentence'] ).lower() + ' '
return batch
__lowerCAmelCase = train_dataset.map(lowerCAmelCase_, remove_columns=['sentence'] )
__lowerCAmelCase = eval_dataset.map(lowerCAmelCase_, remove_columns=['sentence'] )
def extract_all_chars(lowerCAmelCase_ : Tuple ):
__lowerCAmelCase = ' '.join(batch['text'] )
__lowerCAmelCase = list(set(lowerCAmelCase_ ) )
return {"vocab": [vocab], "all_text": [all_text]}
__lowerCAmelCase = train_dataset.map(
lowerCAmelCase_, batched=lowerCAmelCase_, batch_size=-1, keep_in_memory=lowerCAmelCase_, remove_columns=train_dataset.column_names, )
__lowerCAmelCase = train_dataset.map(
lowerCAmelCase_, batched=lowerCAmelCase_, batch_size=-1, keep_in_memory=lowerCAmelCase_, remove_columns=eval_dataset.column_names, )
__lowerCAmelCase = list(set(vocab_train['vocab'][0] ) | set(vocab_test['vocab'][0] ) )
__lowerCAmelCase = {v: k for k, v in enumerate(lowerCAmelCase_ )}
__lowerCAmelCase = vocab_dict[' ']
del vocab_dict[" "]
__lowerCAmelCase = len(lowerCAmelCase_ )
__lowerCAmelCase = len(lowerCAmelCase_ )
with open('vocab.json', 'w' ) as vocab_file:
json.dump(lowerCAmelCase_, lowerCAmelCase_ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase = WavaVecaCTCTokenizer(
'vocab.json', unk_token='[UNK]', pad_token='[PAD]', word_delimiter_token='|', )
__lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=1_6000, padding_value=0.0, do_normalize=lowerCAmelCase_, return_attention_mask=lowerCAmelCase_ )
__lowerCAmelCase = WavaVecaProcessor(feature_extractor=lowerCAmelCase_, tokenizer=lowerCAmelCase_ )
__lowerCAmelCase = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path, cache_dir=model_args.cache_dir, activation_dropout=model_args.activation_dropout, attention_dropout=model_args.attention_dropout, hidden_dropout=model_args.hidden_dropout, feat_proj_dropout=model_args.feat_proj_dropout, mask_time_prob=model_args.mask_time_prob, gradient_checkpointing=training_args.gradient_checkpointing, layerdrop=model_args.layerdrop, ctc_loss_reduction='mean', pad_token_id=processor.tokenizer.pad_token_id, vocab_size=len(processor.tokenizer ), )
if data_args.max_train_samples is not None:
__lowerCAmelCase = min(len(lowerCAmelCase_ ), data_args.max_train_samples )
__lowerCAmelCase = train_dataset.select(range(lowerCAmelCase_ ) )
if data_args.max_val_samples is not None:
__lowerCAmelCase = eval_dataset.select(range(data_args.max_val_samples ) )
__lowerCAmelCase = torchaudio.transforms.Resample(4_8000, 1_6000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(lowerCAmelCase_ : int ):
__lowerCAmelCase , __lowerCAmelCase = torchaudio.load(batch['path'] )
__lowerCAmelCase = resampler(lowerCAmelCase_ ).squeeze().numpy()
__lowerCAmelCase = 1_6000
__lowerCAmelCase = batch['text']
return batch
__lowerCAmelCase = train_dataset.map(
lowerCAmelCase_, remove_columns=train_dataset.column_names, num_proc=data_args.preprocessing_num_workers, )
__lowerCAmelCase = eval_dataset.map(
lowerCAmelCase_, remove_columns=eval_dataset.column_names, num_proc=data_args.preprocessing_num_workers, )
def prepare_dataset(lowerCAmelCase_ : Union[str, Any] ):
# check that all files have the correct sampling rate
assert (
len(set(batch['sampling_rate'] ) ) == 1
), F"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."""
__lowerCAmelCase = processor(
audio=batch['speech'], text=batch['target_text'], sampling_rate=batch['sampling_rate'][0] )
batch.update(lowerCAmelCase_ )
return batch
__lowerCAmelCase = train_dataset.map(
lowerCAmelCase_, remove_columns=train_dataset.column_names, batch_size=training_args.per_device_train_batch_size, batched=lowerCAmelCase_, num_proc=data_args.preprocessing_num_workers, )
__lowerCAmelCase = eval_dataset.map(
lowerCAmelCase_, remove_columns=eval_dataset.column_names, batch_size=training_args.per_device_train_batch_size, batched=lowerCAmelCase_, num_proc=data_args.preprocessing_num_workers, )
# Metric
__lowerCAmelCase = datasets.load_metric('wer' )
def compute_metrics(lowerCAmelCase_ : Optional[Any] ):
__lowerCAmelCase = pred.predictions
__lowerCAmelCase = np.argmax(lowerCAmelCase_, axis=-1 )
__lowerCAmelCase = processor.tokenizer.pad_token_id
__lowerCAmelCase = processor.batch_decode(lowerCAmelCase_ )
# we do not want to group tokens when computing the metrics
__lowerCAmelCase = processor.batch_decode(pred.label_ids, group_tokens=lowerCAmelCase_ )
__lowerCAmelCase = wer_metric.compute(predictions=lowerCAmelCase_, references=lowerCAmelCase_ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
__lowerCAmelCase = DataCollatorCTCWithPadding(processor=lowerCAmelCase_, padding=lowerCAmelCase_ )
# Initialize our Trainer
__lowerCAmelCase = CTCTrainer(
model=lowerCAmelCase_, data_collator=lowerCAmelCase_, args=lowerCAmelCase_, compute_metrics=lowerCAmelCase_, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=processor.feature_extractor, )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__lowerCAmelCase = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
__lowerCAmelCase = model_args.model_name_or_path
else:
__lowerCAmelCase = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
__lowerCAmelCase = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
trainer.save_model()
__lowerCAmelCase = train_result.metrics
__lowerCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase_ )
)
__lowerCAmelCase = min(lowerCAmelCase_, len(lowerCAmelCase_ ) )
trainer.log_metrics('train', lowerCAmelCase_ )
trainer.save_metrics('train', lowerCAmelCase_ )
trainer.save_state()
# Evaluation
__lowerCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowerCAmelCase = trainer.evaluate()
__lowerCAmelCase = data_args.max_val_samples if data_args.max_val_samples is not None else len(lowerCAmelCase_ )
__lowerCAmelCase = min(lowerCAmelCase_, len(lowerCAmelCase_ ) )
trainer.log_metrics('eval', lowerCAmelCase_ )
trainer.save_metrics('eval', lowerCAmelCase_ )
return results
if __name__ == "__main__":
main()
| 53
| 0
|
def _lowerCamelCase ( a_ : int = 10):
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_) or n < 0:
raise ValueError('''Invalid input''')
lowerCamelCase :List[Any] = 10**n
lowerCamelCase :Any = 2_84_33 * (pow(2 , 7_83_04_57 , lowerCAmelCase_)) + 1
return str(number % modulus)
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'{solution(10) = }')
| 166
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
_snake_case : Any = logging.get_logger(__name__)
_snake_case : int = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_snake_case : Optional[Any] = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
_snake_case : str = {
'yjernite/retribert-base-uncased': 512,
}
_snake_case : Optional[int] = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = PRETRAINED_INIT_CONFIGURATION
a_ = RetriBertTokenizer
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self : Dict , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : str="[UNK]" , lowerCAmelCase_ : Optional[Any]="[SEP]" , lowerCAmelCase_ : List[str]="[PAD]" , lowerCAmelCase_ : Optional[int]="[CLS]" , lowerCAmelCase_ : List[Any]="[MASK]" , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : List[str]=None , **lowerCAmelCase_ : List[Any] , ) -> Dict:
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
__lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get('strip_accents' , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowerCAmelCase_ ) != tokenize_chinese_chars
):
__lowerCAmelCase = getattr(lowerCAmelCase_ , normalizer_state.pop('type' ) )
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = strip_accents
__lowerCAmelCase = tokenize_chinese_chars
__lowerCAmelCase = normalizer_class(**lowerCAmelCase_ )
__lowerCAmelCase = do_lower_case
def lowercase ( self : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int]=None ) -> Optional[int]:
__lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
__lowerCAmelCase = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 53
| 0
|
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
return 1.0 / (1.0 + np.exp(-_outputs ))
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
snake_case : Any = np.max(_outputs ,axis=-1 ,keepdims=lowerCAmelCase_ )
snake_case : Optional[Any] = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 ,keepdims=lowerCAmelCase_ )
class __lowercase (_UpperCamelCase ):
"""simple docstring"""
_snake_case = """sigmoid"""
_snake_case = """softmax"""
_snake_case = """none"""
@add_end_docstrings(
_UpperCamelCase , R"""
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `\"default\"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `\"sigmoid\"`: Applies the sigmoid function on the output.
- `\"softmax\"`: Applies the softmax function on the output.
- `\"none\"`: Does not apply any function on the output.
""" , )
class __lowercase (_UpperCamelCase ):
"""simple docstring"""
_snake_case = False
_snake_case = ClassificationFunction.NONE
def __init__( self , **A ) -> Any:
super().__init__(**lowerCAmelCase_ )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def UpperCAmelCase ( self , A=None , A=None , A="" , **A ) -> Union[str, Any]:
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
snake_case : Dict = tokenizer_kwargs
snake_case : Union[str, Any] = {}
if hasattr(self.model.config , """return_all_scores""" ) and return_all_scores is None:
snake_case : Tuple = self.model.config.return_all_scores
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or top_k is None:
snake_case : List[Any] = top_k
snake_case : Optional[Any] = False
elif return_all_scores is not None:
warnings.warn(
"""`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"""
""" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.""" , lowerCAmelCase_ , )
if return_all_scores:
snake_case : Union[str, Any] = None
else:
snake_case : Union[str, Any] = 1
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
snake_case : List[Any] = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
snake_case : Optional[int] = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *A , **A ) -> List[Any]:
snake_case : Dict = super().__call__(*lowerCAmelCase_ , **lowerCAmelCase_ )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
snake_case : List[Any] = """top_k""" not in kwargs
if isinstance(args[0] , lowerCAmelCase_ ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def UpperCAmelCase ( self , A , **A ) -> Dict[str, GenericTensor]:
snake_case : List[Any] = self.framework
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return self.tokenizer(**lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) == 1 and isinstance(inputs[0] , lowerCAmelCase_ ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"""The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"""
""" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair.""" )
return self.tokenizer(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
def UpperCAmelCase ( self , A ) -> Union[str, Any]:
return self.model(**lowerCAmelCase_ )
def UpperCAmelCase ( self , A , A=None , A=1 , A=True ) -> Optional[Any]:
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
snake_case : List[str] = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
snake_case : Any = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , """function_to_apply""" ) and function_to_apply is None:
snake_case : List[str] = self.model.config.function_to_apply
else:
snake_case : Dict = ClassificationFunction.NONE
snake_case : List[str] = model_outputs["""logits"""][0]
snake_case : str = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
snake_case : Optional[int] = sigmoid(lowerCAmelCase_ )
elif function_to_apply == ClassificationFunction.SOFTMAX:
snake_case : Optional[Any] = softmax(lowerCAmelCase_ )
elif function_to_apply == ClassificationFunction.NONE:
snake_case : Optional[Any] = outputs
else:
raise ValueError(f"""Unrecognized `function_to_apply` argument: {function_to_apply}""" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
snake_case : List[str] = [
{"""label""": self.model.config.idalabel[i], """score""": score.item()} for i, score in enumerate(lowerCAmelCase_ )
]
if not _legacy:
dict_scores.sort(key=lambda A : x["score"] , reverse=lowerCAmelCase_ )
if top_k is not None:
snake_case : List[Any] = dict_scores[:top_k]
return dict_scores
| 587
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
_snake_case : Union[str, Any] = imread(R'digital_image_processing/image_data/lena_small.jpg')
_snake_case : Optional[int] = cvtColor(img, COLOR_BGR2GRAY)
def a_ ( ):
__lowerCAmelCase = cn.convert_to_negative(lowerCAmelCase_ )
# assert negative_img array for at least one True
assert negative_img.any()
def a_ ( ):
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowerCAmelCase_, 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def a_ ( ):
__lowerCAmelCase = canny.gen_gaussian_kernel(9, sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def a_ ( ):
__lowerCAmelCase = imread('digital_image_processing/image_data/lena_small.jpg', 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__lowerCAmelCase = canny.canny(lowerCAmelCase_ )
# assert canny array for at least one True
assert canny_array.any()
def a_ ( ):
assert gg.gaussian_filter(lowerCAmelCase_, 5, sigma=0.9 ).all()
def a_ ( ):
# laplace diagonals
__lowerCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__lowerCAmelCase = conv.img_convolve(lowerCAmelCase_, lowerCAmelCase_ ).astype(lowerCAmelCase_ )
assert res.any()
def a_ ( ):
assert med.median_filter(lowerCAmelCase_, 3 ).any()
def a_ ( ):
__lowerCAmelCase , __lowerCAmelCase = sob.sobel_filter(lowerCAmelCase_ )
assert grad.any() and theta.any()
def a_ ( ):
__lowerCAmelCase = sp.make_sepia(lowerCAmelCase_, 20 )
assert sepia.all()
def a_ ( lowerCAmelCase_ : str = "digital_image_processing/image_data/lena_small.jpg" ):
__lowerCAmelCase = bs.Burkes(imread(lowerCAmelCase_, 1 ), 120 )
burkes.process()
assert burkes.output_img.any()
def a_ ( lowerCAmelCase_ : str = "digital_image_processing/image_data/lena_small.jpg", ):
__lowerCAmelCase = rs.NearestNeighbour(imread(lowerCAmelCase_, 1 ), 400, 200 )
nn.process()
assert nn.output.any()
def a_ ( ):
__lowerCAmelCase = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
__lowerCAmelCase = imread(lowerCAmelCase_, 0 )
# Test for get_neighbors_pixel function() return not None
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = image[x_coordinate][y_coordinate]
__lowerCAmelCase = lbp.get_neighbors_pixel(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__lowerCAmelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0, image.shape[0] ):
for j in range(0, image.shape[1] ):
__lowerCAmelCase = lbp.local_binary_value(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
assert lbp_image.any()
| 53
| 0
|
from __future__ import annotations
def lowerCamelCase_ ( _lowercase ) -> List[str]:
if not nums:
return 0
__A : Optional[Any] = nums[0]
__A : List[Any] = 0
for num in nums[1:]:
__A , __A : Optional[Any] = (
max_excluding + num,
max(lowerCAmelCase_ , lowerCAmelCase_ ),
)
return max(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 520
|
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_snake_case : List[Any] = logging.get_logger(__name__)
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = ["""pixel_values"""]
def __init__( self : Optional[int] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_5_5 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , lowerCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **lowerCAmelCase_ : Any , ) -> None:
super().__init__(**lowerCAmelCase_ )
__lowerCAmelCase = size if size is not None else {'shortest_edge': 2_2_4}
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
__lowerCAmelCase = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , param_name='crop_size' )
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
__lowerCAmelCase = resample
__lowerCAmelCase = do_center_crop
__lowerCAmelCase = crop_size
__lowerCAmelCase = do_rescale
__lowerCAmelCase = rescale_factor
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__lowerCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowercase ( self : Dict , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Optional[int] , ) -> np.ndarray:
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
__lowerCAmelCase = int((2_5_6 / 2_2_4) * size['shortest_edge'] )
__lowerCAmelCase = get_resize_output_image_size(lowerCAmelCase_ , size=lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
__lowerCAmelCase = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
lowerCAmelCase_ , size=(size_dict['height'], size_dict['width']) , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : str , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : str , ) -> np.ndarray:
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(lowerCAmelCase_ , size=(size['height'], size['width']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : Dict , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[int, float] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : int , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[str] , ) -> np.ndarray:
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , lowerCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , lowerCAmelCase_ : Optional[TensorType] = None , lowerCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase_ : str , ) -> BatchFeature:
__lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase = resample if resample is not None else self.resample
__lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase = image_std if image_std is not None else self.image_std
__lowerCAmelCase = size if size is not None else self.size
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
__lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , param_name='crop_size' )
__lowerCAmelCase = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__lowerCAmelCase = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
__lowerCAmelCase = [self.resize(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
if do_center_crop:
__lowerCAmelCase = [self.center_crop(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
if do_rescale:
__lowerCAmelCase = [self.rescale(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
if do_normalize:
__lowerCAmelCase = [self.normalize(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
__lowerCAmelCase = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
__lowerCAmelCase = {'pixel_values': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 53
| 0
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowercase__ ( _UpperCamelCase , unittest.TestCase):
UpperCamelCase_ = ShapEPipeline
UpperCamelCase_ = ["""prompt"""]
UpperCamelCase_ = ["""prompt"""]
UpperCamelCase_ = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
UpperCamelCase_ = False
@property
def __A ( self : int ):
'''simple docstring'''
return 32
@property
def __A ( self : int ):
'''simple docstring'''
return 32
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return 8
@property
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def __A ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowerCAmelCase_ )
@property
def __A ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
SCREAMING_SNAKE_CASE : List[str] = PriorTransformer(**lowerCAmelCase_ )
return model
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
SCREAMING_SNAKE_CASE : List[Any] = ShapERenderer(**lowerCAmelCase_ )
return model
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.dummy_prior
SCREAMING_SNAKE_CASE : Dict = self.dummy_text_encoder
SCREAMING_SNAKE_CASE : Dict = self.dummy_tokenizer
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_renderer
SCREAMING_SNAKE_CASE : Optional[Any] = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=lowerCAmelCase_ , clip_sample=lowerCAmelCase_ , clip_sample_range=1.0 , )
SCREAMING_SNAKE_CASE : Dict = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def __A ( self : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int]=0 ):
'''simple docstring'''
if str(lowerCAmelCase_ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(lowerCAmelCase_ )
else:
SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = '''cpu'''
SCREAMING_SNAKE_CASE : int = self.get_dummy_components()
SCREAMING_SNAKE_CASE : List[str] = self.pipeline_class(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = pipe(**self.get_dummy_inputs(lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = output.images[0]
SCREAMING_SNAKE_CASE : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
SCREAMING_SNAKE_CASE : List[Any] = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __A ( self : int ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = torch_device == '''cpu'''
SCREAMING_SNAKE_CASE : List[Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowerCAmelCase_ , relax_max_difference=lowerCAmelCase_ , )
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = 1
SCREAMING_SNAKE_CASE : List[Any] = 2
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase_ )
for key in inputs.keys():
if key in self.batch_params:
SCREAMING_SNAKE_CASE : List[str] = batch_size * [inputs[key]]
SCREAMING_SNAKE_CASE : List[str] = pipe(**lowerCAmelCase_ , num_images_per_prompt=lowerCAmelCase_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase):
def __A ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
SCREAMING_SNAKE_CASE : Optional[int] = ShapEPipeline.from_pretrained('''openai/shap-e''' )
SCREAMING_SNAKE_CASE : str = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 )
SCREAMING_SNAKE_CASE : str = pipe(
'''a shark''' , generator=lowerCAmelCase_ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCAmelCase_ , lowerCAmelCase_ )
| 248
|
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Optional[int]=8 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[Any]=9_9 , lowerCAmelCase_ : List[Any]=1_6 , lowerCAmelCase_ : int=5 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : str=3_6 , lowerCAmelCase_ : Optional[int]="gelu" , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : str=5_1_2 , lowerCAmelCase_ : List[str]=1_6 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : Tuple=0.02 , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : List[Any]=4 , lowerCAmelCase_ : List[str]=None , ) -> List[Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = scope
def lowercase ( self : Optional[int] ) -> Dict:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self : Any ) -> Union[str, Any]:
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def lowercase ( self : Dict ) -> List[Any]:
__lowerCAmelCase = self.get_config()
__lowerCAmelCase = 3_0_0
return config
def lowercase ( self : Optional[int] ) -> Union[str, Any]:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
__lowerCAmelCase = True
__lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase ( self : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple ) -> List[str]:
__lowerCAmelCase = MraModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , ) -> Tuple:
__lowerCAmelCase = True
__lowerCAmelCase = MraModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )
__lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , )
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple ) -> List[str]:
__lowerCAmelCase = MraForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict ) -> str:
__lowerCAmelCase = MraForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict ) -> Optional[Any]:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = MraForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict ) -> Any:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = MraForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] ) -> List[Any]:
__lowerCAmelCase = self.num_choices
__lowerCAmelCase = MraForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self : Tuple ) -> Optional[Any]:
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
a_ = False
a_ = False
a_ = False
a_ = False
a_ = ()
def lowercase ( self : List[Any] ) -> Optional[Any]:
__lowerCAmelCase = MraModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=3_7 )
def lowercase ( self : Tuple ) -> List[str]:
self.config_tester.run_common_tests()
def lowercase ( self : Optional[int] ) -> Tuple:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : int ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase = type
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase_ )
def lowercase ( self : List[str] ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase_ )
def lowercase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase_ )
def lowercase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase_ )
def lowercase ( self : Tuple ) -> str:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase_ )
@slow
def lowercase ( self : Optional[int] ) -> Optional[int]:
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = MraModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@unittest.skip(reason='MRA does not output attentions' )
def lowercase ( self : Optional[int] ) -> Tuple:
return
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase ( self : Optional[Any] ) -> List[str]:
__lowerCAmelCase = MraModel.from_pretrained('uw-madison/mra-base-512-4' )
__lowerCAmelCase = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
__lowerCAmelCase = model(lowerCAmelCase_ )[0]
__lowerCAmelCase = torch.Size((1, 2_5_6, 7_6_8) )
self.assertEqual(output.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@slow
def lowercase ( self : int ) -> Optional[int]:
__lowerCAmelCase = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' )
__lowerCAmelCase = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
__lowerCAmelCase = model(lowerCAmelCase_ )[0]
__lowerCAmelCase = 5_0_2_6_5
__lowerCAmelCase = torch.Size((1, 2_5_6, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@slow
def lowercase ( self : Any ) -> List[str]:
__lowerCAmelCase = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' )
__lowerCAmelCase = torch.arange(4_0_9_6 ).unsqueeze(0 )
with torch.no_grad():
__lowerCAmelCase = model(lowerCAmelCase_ )[0]
__lowerCAmelCase = 5_0_2_6_5
__lowerCAmelCase = torch.Size((1, 4_0_9_6, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 53
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class _UpperCamelCase ( _UpperCamelCase ):
'''simple docstring'''
a_ : Optional[Any] = "audio-spectrogram-transformer"
def __init__( self : Union[str, Any] , _lowerCamelCase : int=7_6_8 , _lowerCamelCase : List[Any]=1_2 , _lowerCamelCase : int=1_2 , _lowerCamelCase : Union[str, Any]=3_0_7_2 , _lowerCamelCase : Union[str, Any]="gelu" , _lowerCamelCase : Dict=0.0 , _lowerCamelCase : Any=0.0 , _lowerCamelCase : List[Any]=0.02 , _lowerCamelCase : Optional[Any]=1E-12 , _lowerCamelCase : Union[str, Any]=1_6 , _lowerCamelCase : Any=True , _lowerCamelCase : str=1_0 , _lowerCamelCase : List[Any]=1_0 , _lowerCamelCase : Dict=1_0_2_4 , _lowerCamelCase : Union[str, Any]=1_2_8 , **_lowerCamelCase : str , ):
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
__lowerCamelCase : List[Any] = hidden_size
__lowerCamelCase : List[str] = num_hidden_layers
__lowerCamelCase : Any = num_attention_heads
__lowerCamelCase : List[str] = intermediate_size
__lowerCamelCase : List[str] = hidden_act
__lowerCamelCase : str = hidden_dropout_prob
__lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
__lowerCamelCase : Optional[Any] = initializer_range
__lowerCamelCase : Dict = layer_norm_eps
__lowerCamelCase : int = patch_size
__lowerCamelCase : Union[str, Any] = qkv_bias
__lowerCamelCase : Tuple = frequency_stride
__lowerCamelCase : Any = time_stride
__lowerCamelCase : Any = max_length
__lowerCamelCase : List[str] = num_mel_bins
| 519
|
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_snake_case : Union[str, Any] = 2
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , *, # begin keyword-only arguments
lowerCAmelCase_ : str="<s>" , lowerCAmelCase_ : Dict="<pad>" , lowerCAmelCase_ : Any="</s>" , lowerCAmelCase_ : List[str]="<unk>" , lowerCAmelCase_ : Optional[Any]=None , ) -> Union[str, Any]:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = bos, unk, pad, eos
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = {}
__lowerCAmelCase = self.add_symbol(lowerCAmelCase_ )
__lowerCAmelCase = self.add_symbol(lowerCAmelCase_ )
__lowerCAmelCase = self.add_symbol(lowerCAmelCase_ )
__lowerCAmelCase = self.add_symbol(lowerCAmelCase_ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(lowerCAmelCase_ )
__lowerCAmelCase = len(self.symbols )
def __eq__( self : Dict , lowerCAmelCase_ : Dict ) -> str:
return self.indices == other.indices
def __getitem__( self : List[Any] , lowerCAmelCase_ : int ) -> Union[str, Any]:
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : Tuple ) -> List[Any]:
return len(self.symbols )
def __contains__( self : Optional[Any] , lowerCAmelCase_ : Dict ) -> Optional[int]:
return sym in self.indices
@classmethod
def lowercase ( cls : Dict , lowerCAmelCase_ : str ) -> str:
__lowerCAmelCase = cls()
d.add_from_file(lowerCAmelCase_ )
return d
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int=1 , lowerCAmelCase_ : Any=False ) -> Optional[Any]:
if word in self.indices and not overwrite:
__lowerCAmelCase = self.indices[word]
__lowerCAmelCase = self.count[idx] + n
return idx
else:
__lowerCAmelCase = len(self.symbols )
__lowerCAmelCase = idx
self.symbols.append(lowerCAmelCase_ )
self.count.append(lowerCAmelCase_ )
return idx
def lowercase ( self : str , lowerCAmelCase_ : Union[str, Any] ) -> Dict:
return 0
def lowercase ( self : Tuple , lowerCAmelCase_ : Union[str, Any] ) -> int:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
try:
with open(lowerCAmelCase_ , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(lowerCAmelCase_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(lowerCAmelCase_ ) )
return
__lowerCAmelCase = f.readlines()
__lowerCAmelCase = self._load_meta(lowerCAmelCase_ )
for line in lines[indices_start_line:]:
try:
__lowerCAmelCase , __lowerCAmelCase = line.rstrip().rsplit(' ' , 1 )
if field == "#fairseq:overwrite":
__lowerCAmelCase = True
__lowerCAmelCase , __lowerCAmelCase = line.rsplit(' ' , 1 )
else:
__lowerCAmelCase = False
__lowerCAmelCase = int(lowerCAmelCase_ )
__lowerCAmelCase = line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(lowerCAmelCase_ ) )
self.add_symbol(lowerCAmelCase_ , n=lowerCAmelCase_ , overwrite=lowerCAmelCase_ )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def a_ ( lowerCAmelCase_ : List[str] ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
__lowerCAmelCase = dict((re.sub(R'@@$', '', lowerCAmelCase_ ), v) if k.endswith('@@' ) else (re.sub(R'$', '</w>', lowerCAmelCase_ ), v) for k, v in d.items() )
__lowerCAmelCase = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
__lowerCAmelCase = d[k] # restore
return da
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : List[str] ):
# prep
if not os.path.exists(lowerCAmelCase_ ):
raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(lowerCAmelCase_, exist_ok=lowerCAmelCase_ )
print(F"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
__lowerCAmelCase = os.path.join(lowerCAmelCase_, 'checkpoint.pt' )
if not os.path.isfile(lowerCAmelCase_ ):
raise ValueError(F"""path to the file {checkpoint_file} does not exist!""" )
__lowerCAmelCase = torch.load(lowerCAmelCase_, map_location='cpu' )
__lowerCAmelCase = chkpt['cfg']['model']
# dicts
__lowerCAmelCase = os.path.join(lowerCAmelCase_, 'dict.txt' )
if not os.path.isfile(lowerCAmelCase_ ):
raise ValueError(F"""path to the file {dict_file} does not exist!""" )
__lowerCAmelCase = Dictionary.load(lowerCAmelCase_ )
__lowerCAmelCase = rewrite_dict_keys(src_dict.indices )
__lowerCAmelCase = len(lowerCAmelCase_ )
__lowerCAmelCase = os.path.join(lowerCAmelCase_, VOCAB_FILES_NAMES['vocab_file'] )
print(F"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(lowerCAmelCase_, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowerCAmelCase_, ensure_ascii=lowerCAmelCase_, indent=lowerCAmelCase_ ) )
# merges_file (bpecodes)
__lowerCAmelCase = os.path.join(lowerCAmelCase_, 'bpecodes' )
if not os.path.isfile(lowerCAmelCase_ ):
raise ValueError(F"""path to the file {bpecodes_file} does not exist!""" )
__lowerCAmelCase = os.path.join(lowerCAmelCase_, VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(lowerCAmelCase_, lowerCAmelCase_ )
# model config
__lowerCAmelCase = os.path.join(lowerCAmelCase_, 'config.json' )
__lowerCAmelCase = {
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.02,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1E-12,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(F"""Generating {biogpt_model_config_file}""" )
with open(lowerCAmelCase_, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowerCAmelCase_, ensure_ascii=lowerCAmelCase_, indent=lowerCAmelCase_ ) )
# tokenizer config
__lowerCAmelCase = os.path.join(lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase = {
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 1024,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(F"""Generating {biogpt_tokenizer_config_file}""" )
with open(lowerCAmelCase_, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowerCAmelCase_, ensure_ascii=lowerCAmelCase_, indent=lowerCAmelCase_ ) )
# model
__lowerCAmelCase = chkpt['model']
# remove unneeded keys
__lowerCAmelCase = [
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
__lowerCAmelCase = model_state_dict.pop(lowerCAmelCase_ )
else:
__lowerCAmelCase = model_state_dict.pop(lowerCAmelCase_ )
__lowerCAmelCase = BioGptConfig.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = BioGptForCausalLM(lowerCAmelCase_ )
# check that it loads ok
model_new.load_state_dict(lowerCAmelCase_ )
# save
__lowerCAmelCase = os.path.join(lowerCAmelCase_, lowerCAmelCase_ )
print(F"""Generating {pytorch_weights_dump_path}""" )
torch.save(lowerCAmelCase_, lowerCAmelCase_ )
print('Conversion is done!' )
if __name__ == "__main__":
_snake_case : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_snake_case : int = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 53
| 0
|
'''simple docstring'''
class _lowercase :
def __init__( self , A__ ) -> Tuple:
# we need a list not a string, so do something to change the type
snake_case = arr.split(''',''' )
def UpperCamelCase ( self ) -> List[str]:
snake_case = [int(self.array[0] )] * len(self.array )
snake_case = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
snake_case = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
snake_case = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
_lowercase = input('please input some numbers:')
_lowercase = SubArray(whole_array)
_lowercase = array.solve_sub_array()
print(('the results is:', re))
| 342
|
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
a_ = """pixel_values"""
a_ = False
a_ = TimmBackboneConfig
def __init__( self : Tuple , lowerCAmelCase_ : Any , **lowerCAmelCase_ : Optional[int] ) -> Optional[Any]:
requires_backends(self , 'timm' )
super().__init__(lowerCAmelCase_ )
__lowerCAmelCase = config
if config.backbone is None:
raise ValueError('backbone is not set in the config. Please set it to a timm model name.' )
if config.backbone not in timm.list_models():
raise ValueError(f"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(lowerCAmelCase_ , 'out_features' ) and config.out_features is not None:
raise ValueError('out_features is not supported by TimmBackbone. Please use out_indices instead.' )
__lowerCAmelCase = getattr(lowerCAmelCase_ , 'use_pretrained_backbone' , lowerCAmelCase_ )
if pretrained is None:
raise ValueError('use_pretrained_backbone is not set in the config. Please set it to True or False.' )
# We just take the final layer by default. This matches the default for the transformers models.
__lowerCAmelCase = config.out_indices if getattr(lowerCAmelCase_ , 'out_indices' , lowerCAmelCase_ ) is not None else (-1,)
__lowerCAmelCase = timm.create_model(
config.backbone , pretrained=lowerCAmelCase_ , features_only=config.features_only , in_chans=config.num_channels , out_indices=lowerCAmelCase_ , **lowerCAmelCase_ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
__lowerCAmelCase = self._backbone.return_layers
__lowerCAmelCase = {layer['module']: str(lowerCAmelCase_ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(lowerCAmelCase_ )
@classmethod
def lowercase ( cls : int , lowerCAmelCase_ : Dict , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['vision', 'timm'] )
from ...models.timm_backbone import TimmBackboneConfig
__lowerCAmelCase = kwargs.pop('config' , TimmBackboneConfig() )
__lowerCAmelCase = kwargs.pop('use_timm_backbone' , lowerCAmelCase_ )
if not use_timm:
raise ValueError('use_timm_backbone must be True for timm backbones' )
__lowerCAmelCase = kwargs.pop('num_channels' , config.num_channels )
__lowerCAmelCase = kwargs.pop('features_only' , config.features_only )
__lowerCAmelCase = kwargs.pop('use_pretrained_backbone' , config.use_pretrained_backbone )
__lowerCAmelCase = kwargs.pop('out_indices' , config.out_indices )
__lowerCAmelCase = TimmBackboneConfig(
backbone=lowerCAmelCase_ , num_channels=lowerCAmelCase_ , features_only=lowerCAmelCase_ , use_pretrained_backbone=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , )
return super()._from_config(lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : Tuple , lowerCAmelCase_ : int ) -> Dict:
pass
def lowercase ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Any=None , **lowerCAmelCase_ : Dict ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
__lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('Cannot output attentions for timm backbones at the moment' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
__lowerCAmelCase = self._all_layers
__lowerCAmelCase = self._backbone(lowerCAmelCase_ , **lowerCAmelCase_ )
__lowerCAmelCase = self._return_layers
__lowerCAmelCase = tuple(hidden_states[i] for i in self.out_indices )
else:
__lowerCAmelCase = self._backbone(lowerCAmelCase_ , **lowerCAmelCase_ )
__lowerCAmelCase = None
__lowerCAmelCase = tuple(lowerCAmelCase_ )
__lowerCAmelCase = tuple(lowerCAmelCase_ ) if hidden_states is not None else None
if not return_dict:
__lowerCAmelCase = (feature_maps,)
if output_hidden_states:
__lowerCAmelCase = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowerCAmelCase_ , hidden_states=lowerCAmelCase_ , attentions=lowerCAmelCase_ )
| 53
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class UpperCAmelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
A : List[Any] = StableUnCLIPImgaImgPipeline
A : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
A : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A : Optional[int] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A : List[Any] = frozenset([] )
def _lowerCAmelCase ( self ) -> Optional[int]:
snake_case_ : List[str] = 32
snake_case_ : int = embedder_hidden_size
# image encoding components
snake_case_ : List[Any] = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
snake_case_ : int = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCAmelCase_ , projection_dim=lowerCAmelCase_ , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
snake_case_ : Any = StableUnCLIPImageNormalizer(embedding_dim=lowerCAmelCase_ )
snake_case_ : Optional[int] = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
snake_case_ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
snake_case_ : str = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCAmelCase_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
snake_case_ : List[str] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCAmelCase_ , layers_per_block=1 , upcast_attention=lowerCAmelCase_ , use_linear_projection=lowerCAmelCase_ , )
torch.manual_seed(0 )
snake_case_ : List[Any] = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=lowerCAmelCase_ , steps_offset=1 , )
torch.manual_seed(0 )
snake_case_ : Optional[int] = AutoencoderKL()
snake_case_ : List[str] = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=True ) -> str:
if str(lowerCAmelCase_ ).startswith("mps" ):
snake_case_ : Tuple = torch.manual_seed(lowerCAmelCase_ )
else:
snake_case_ : Union[str, Any] = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
snake_case_ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
if pil_image:
snake_case_ : Optional[int] = input_image * 0.5 + 0.5
snake_case_ : int = input_image.clamp(0 , 1 )
snake_case_ : Union[str, Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
snake_case_ : Optional[Any] = DiffusionPipeline.numpy_to_pil(lowerCAmelCase_ )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case_ : List[Any] = self.get_dummy_components()
snake_case_ : Optional[Any] = StableUnCLIPImgaImgPipeline(**lowerCAmelCase_ )
snake_case_ : Any = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
snake_case_ : Optional[int] = self.get_dummy_inputs(lowerCAmelCase_ )
inputs.update({"image_embeds": None} )
snake_case_ : Optional[Any] = sd_pipe(**lowerCAmelCase_ ).images
snake_case_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ : Union[str, Any] = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowerCAmelCase ( self ) -> Any:
snake_case_ : List[str] = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=lowerCAmelCase_ )
def _lowerCAmelCase ( self ) -> Optional[int]:
snake_case_ : Optional[int] = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=lowerCAmelCase_ )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCAmelCase_ )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ) -> List[Any]:
snake_case_ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
snake_case_ : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
snake_case_ : Optional[int] = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
snake_case_ : Tuple = torch.Generator(device="cpu" ).manual_seed(0 )
snake_case_ : int = pipe(lowerCAmelCase_ , "anime turle" , generator=lowerCAmelCase_ , output_type="np" )
snake_case_ : Dict = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCAmelCase_ , lowerCAmelCase_ )
def _lowerCAmelCase ( self ) -> int:
snake_case_ : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
snake_case_ : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
snake_case_ : int = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
snake_case_ : Optional[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
snake_case_ : Any = pipe(lowerCAmelCase_ , "anime turle" , generator=lowerCAmelCase_ , output_type="np" )
snake_case_ : int = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCAmelCase_ , lowerCAmelCase_ )
def _lowerCAmelCase ( self ) -> int:
snake_case_ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ : Optional[int] = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
snake_case_ : Dict = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
snake_case_ : Optional[Any] = pipe(
lowerCAmelCase_ , "anime turtle" , num_inference_steps=2 , output_type="np" , )
snake_case_ : Dict = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 568
|
from __future__ import annotations
def a_ ( lowerCAmelCase_ : list[float] ):
if len(lowerCAmelCase_ ) < 2:
raise ValueError('Monogons and Digons are not polygons in the Euclidean space' )
if any(i <= 0 for i in nums ):
raise ValueError('All values must be greater than 0' )
__lowerCAmelCase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
| 0
|
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def A_ ( A__ , A__ ) -> Optional[Any]:
# Load checkpoint
a__ : Optional[int] = torch.load(lowerCAmelCase_ , map_location='cpu' )
a__ : Dict = chkpt['model']
# We have the base model one level deeper than the original XLM repository
a__ : Optional[int] = {}
for k, v in state_dict.items():
if "pred_layer" in k:
a__ : str = v
else:
a__ : Any = v
a__ : List[Any] = chkpt['params']
a__ : Tuple = {n: v for n, v in config.items() if not isinstance(lowerCAmelCase_ , (torch.FloatTensor, numpy.ndarray) )}
a__ : Optional[int] = chkpt['dico_word2id']
a__ : List[str] = {s + '</w>' if s.find('@@' ) == -1 and i > 13 else s.replace('@@' , '' ): i for s, i in vocab.items()}
# Save pytorch-model
a__ : List[Any] = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
a__ : int = pytorch_dump_folder_path + '/' + CONFIG_NAME
a__ : Tuple = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['vocab_file']
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(lowerCAmelCase_ , lowerCAmelCase_ )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(lowerCAmelCase_ , indent=2 ) + '\n' )
print(F'Save vocab file to {pytorch_config_dump_path}' )
with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(lowerCAmelCase_ , indent=2 ) + '\n' )
if __name__ == "__main__":
lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xlm_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowercase : Any = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 302
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Dict=3_2 , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : Union[str, Any]=1_0 , lowerCAmelCase_ : List[str]=[1_0, 2_0, 3_0, 4_0] , lowerCAmelCase_ : Optional[int]=[1, 1, 2, 1] , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Tuple="relu" , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : Optional[int]=None , ) -> int:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = embeddings_size
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = depths
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = hidden_act
__lowerCAmelCase = num_labels
__lowerCAmelCase = scope
__lowerCAmelCase = len(lowerCAmelCase_ )
def lowercase ( self : Optional[int] ) -> List[Any]:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = self.get_config()
return config, pixel_values
def lowercase ( self : Tuple ) -> List[Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowercase ( self : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] ) -> str:
__lowerCAmelCase = FlaxRegNetModel(config=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def lowercase ( self : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int ) -> Tuple:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = FlaxRegNetForImageClassification(config=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : List[Any] ) -> Optional[Any]:
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
a_ = False
a_ = False
a_ = False
def lowercase ( self : Dict ) -> None:
__lowerCAmelCase = FlaxRegNetModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ )
def lowercase ( self : int ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self : str ) -> Union[str, Any]:
return
def lowercase ( self : Dict ) -> str:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] ) -> Tuple:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def lowercase ( self : Union[str, Any] ) -> Any:
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def lowercase ( self : Tuple ) -> Tuple:
pass
def lowercase ( self : Optional[Any] ) -> str:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowercase ( self : List[Any] ) -> Union[str, Any]:
def check_hidden_states_output(lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ):
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
__lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase_ ) , expected_num_stages + 1 )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : str ) -> str:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCAmelCase = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = model_class(lowerCAmelCase_ )
@jax.jit
def model_jitted(lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Dict ):
return model(pixel_values=lowerCAmelCase_ , **lowerCAmelCase_ )
with self.subTest('JIT Enabled' ):
__lowerCAmelCase = model_jitted(**lowerCAmelCase_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__lowerCAmelCase = model_jitted(**lowerCAmelCase_ ).to_tuple()
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def a_ ( ):
__lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase ( self : Union[str, Any] ) -> Optional[Any]:
return AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None
@slow
def lowercase ( self : Optional[Any] ) -> Union[str, Any]:
__lowerCAmelCase = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=lowerCAmelCase_ , return_tensors='np' )
__lowerCAmelCase = model(**lowerCAmelCase_ )
# verify the logits
__lowerCAmelCase = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__lowerCAmelCase = jnp.array([-0.41_80, -1.50_51, -3.48_36] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 53
| 0
|
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =set()
# edges = list of graph's edges
UpperCAmelCase_ =get_edges(lowercase__ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
UpperCAmelCase_ , UpperCAmelCase_ =edges.pop()
chosen_vertices.add(lowercase__ )
chosen_vertices.add(lowercase__ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowercase__ )
return chosen_vertices
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 54
|
import sys
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =len(lowercase__ )
UpperCAmelCase_ =[[0 for x in range(lowercase__ )] for x in range(lowercase__ )]
UpperCAmelCase_ =[[0 for x in range(lowercase__ )] for x in range(lowercase__ )]
for chain_length in range(2 , lowercase__ ):
for a in range(1 , n - chain_length + 1 ):
UpperCAmelCase_ =a + chain_length - 1
UpperCAmelCase_ =sys.maxsize
for c in range(lowercase__ , lowercase__ ):
UpperCAmelCase_ =(
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCAmelCase_ =cost
UpperCAmelCase_ =c
return matrix, sol
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if i == j:
print("A" + str(lowercase__ ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(lowercase__ , lowercase__ , optimal_solution[i][j] )
print_optiomal_solution(lowercase__ , optimal_solution[i][j] + 1 , lowercase__ )
print(")" , end=" " )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =[3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
UpperCAmelCase_ =len(lowercase__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCAmelCase_ , UpperCAmelCase_ =matrix_chain_order(lowercase__ )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowercase__ , 1 , n - 1 )
if __name__ == "__main__":
main()
| 54
| 1
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class A ( __lowercase ):
_snake_case =42
class A ( __lowercase , __lowercase ):
@register_to_config
def __init__( self: Optional[Any] , _lowerCAmelCase: int = 6_5536 , _lowerCAmelCase: Optional[int] = None , _lowerCAmelCase: int = 2 , _lowerCAmelCase: int = 2 , _lowerCAmelCase: int = 0 , _lowerCAmelCase: str = "fourier" , _lowerCAmelCase: bool = True , _lowerCAmelCase: bool = False , _lowerCAmelCase: float = 0.0 , _lowerCAmelCase: Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , _lowerCAmelCase: Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , _lowerCAmelCase: Tuple[str] = "UNetMidBlock1D" , _lowerCAmelCase: str = None , _lowerCAmelCase: Tuple[int] = (32, 32, 64) , _lowerCAmelCase: str = None , _lowerCAmelCase: int = 8 , _lowerCAmelCase: int = 1 , _lowerCAmelCase: bool = False , ) -> int:
'''simple docstring'''
super().__init__()
UpperCAmelCase_ =sample_size
# time
if time_embedding_type == "fourier":
UpperCAmelCase_ =GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=_lowerCAmelCase , log=_lowerCAmelCase , flip_sin_to_cos=_lowerCAmelCase )
UpperCAmelCase_ =2 * block_out_channels[0]
elif time_embedding_type == "positional":
UpperCAmelCase_ =Timesteps(
block_out_channels[0] , flip_sin_to_cos=_lowerCAmelCase , downscale_freq_shift=_lowerCAmelCase )
UpperCAmelCase_ =block_out_channels[0]
if use_timestep_embedding:
UpperCAmelCase_ =block_out_channels[0] * 4
UpperCAmelCase_ =TimestepEmbedding(
in_channels=_lowerCAmelCase , time_embed_dim=_lowerCAmelCase , act_fn=_lowerCAmelCase , out_dim=block_out_channels[0] , )
UpperCAmelCase_ =nn.ModuleList([] )
UpperCAmelCase_ =None
UpperCAmelCase_ =nn.ModuleList([] )
UpperCAmelCase_ =None
# down
UpperCAmelCase_ =in_channels
for i, down_block_type in enumerate(_lowerCAmelCase ):
UpperCAmelCase_ =output_channel
UpperCAmelCase_ =block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
UpperCAmelCase_ =i == len(_lowerCAmelCase ) - 1
UpperCAmelCase_ =get_down_block(
_lowerCAmelCase , num_layers=_lowerCAmelCase , in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(_lowerCAmelCase )
# mid
UpperCAmelCase_ =get_mid_block(
_lowerCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=_lowerCAmelCase , add_downsample=_lowerCAmelCase , )
# up
UpperCAmelCase_ =list(reversed(_lowerCAmelCase ) )
UpperCAmelCase_ =reversed_block_out_channels[0]
if out_block_type is None:
UpperCAmelCase_ =out_channels
else:
UpperCAmelCase_ =block_out_channels[0]
for i, up_block_type in enumerate(_lowerCAmelCase ):
UpperCAmelCase_ =output_channel
UpperCAmelCase_ =(
reversed_block_out_channels[i + 1] if i < len(_lowerCAmelCase ) - 1 else final_upsample_channels
)
UpperCAmelCase_ =i == len(_lowerCAmelCase ) - 1
UpperCAmelCase_ =get_up_block(
_lowerCAmelCase , num_layers=_lowerCAmelCase , in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(_lowerCAmelCase )
UpperCAmelCase_ =output_channel
# out
UpperCAmelCase_ =norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
UpperCAmelCase_ =get_out_block(
out_block_type=_lowerCAmelCase , num_groups_out=_lowerCAmelCase , embed_dim=block_out_channels[0] , out_channels=_lowerCAmelCase , act_fn=_lowerCAmelCase , fc_dim=block_out_channels[-1] // 4 , )
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: Union[torch.Tensor, float, int] , _lowerCAmelCase: bool = True , ) -> Union[UNetaDOutput, Tuple]:
'''simple docstring'''
UpperCAmelCase_ =timestep
if not torch.is_tensor(_lowerCAmelCase ):
UpperCAmelCase_ =torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(_lowerCAmelCase ) and len(timesteps.shape ) == 0:
UpperCAmelCase_ =timesteps[None].to(sample.device )
UpperCAmelCase_ =self.time_proj(_lowerCAmelCase )
if self.config.use_timestep_embedding:
UpperCAmelCase_ =self.time_mlp(_lowerCAmelCase )
else:
UpperCAmelCase_ =timestep_embed[..., None]
UpperCAmelCase_ =timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
UpperCAmelCase_ =timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
UpperCAmelCase_ =()
for downsample_block in self.down_blocks:
UpperCAmelCase_ , UpperCAmelCase_ =downsample_block(hidden_states=_lowerCAmelCase , temb=_lowerCAmelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
UpperCAmelCase_ =self.mid_block(_lowerCAmelCase , _lowerCAmelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
UpperCAmelCase_ =down_block_res_samples[-1:]
UpperCAmelCase_ =down_block_res_samples[:-1]
UpperCAmelCase_ =upsample_block(_lowerCAmelCase , res_hidden_states_tuple=_lowerCAmelCase , temb=_lowerCAmelCase )
# 5. post-process
if self.out_block:
UpperCAmelCase_ =self.out_block(_lowerCAmelCase , _lowerCAmelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=_lowerCAmelCase )
| 54
|
from math import loga
def a__ ( lowercase__ ):
'''simple docstring'''
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(lowercase__ , lowercase__ ):
raise TypeError("Input value must be a 'int' type" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
| 1
|
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class A ( tf.keras.layers.Layer ):
def __init__( self: Optional[Any] , _lowerCAmelCase: Dict[str, int] , _lowerCAmelCase: List[str] , _lowerCAmelCase: int = None , _lowerCAmelCase: int = None ) -> List[Any]:
'''simple docstring'''
super().__init__()
UpperCAmelCase_ =pad_token_id
UpperCAmelCase_ =max_length
UpperCAmelCase_ =vocab
UpperCAmelCase_ =merges
UpperCAmelCase_ =BytePairTokenizer(_lowerCAmelCase , _lowerCAmelCase , sequence_length=_lowerCAmelCase )
@classmethod
def lowerCAmelCase__ ( cls: Union[str, Any] , _lowerCAmelCase: GPTaTokenizer , *_lowerCAmelCase: List[Any] , **_lowerCAmelCase: Dict ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =[" ".join(_lowerCAmelCase ) for m in tokenizer.bpe_ranks.keys()]
UpperCAmelCase_ =tokenizer.get_vocab()
return cls(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def lowerCAmelCase__ ( cls: Dict , _lowerCAmelCase: Union[str, os.PathLike] , *_lowerCAmelCase: List[Any] , **_lowerCAmelCase: List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =GPTaTokenizer.from_pretrained(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
return cls.from_tokenizer(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def lowerCAmelCase__ ( cls: List[str] , _lowerCAmelCase: Optional[int] ) -> List[Any]:
'''simple docstring'''
return cls(**_lowerCAmelCase )
def lowerCAmelCase__ ( self: Dict ) -> int:
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def lowerCAmelCase__ ( self: Union[str, Any] , _lowerCAmelCase: List[str] , _lowerCAmelCase: int = None ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.tf_tokenizer(_lowerCAmelCase )
UpperCAmelCase_ =tf.ones_like(_lowerCAmelCase )
if self.pad_token_id is not None:
# pad the tokens up to max length
UpperCAmelCase_ =max_length if max_length is not None else self.max_length
if max_length is not None:
UpperCAmelCase_ , UpperCAmelCase_ =pad_model_inputs(
_lowerCAmelCase , max_seq_length=_lowerCAmelCase , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 54
|
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Union[str, Any] =logging.get_logger(__name__)
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =torch.load(lowercase__ , map_location="cpu" )
if "model" in sd.keys():
UpperCAmelCase_ =torch.load(lowercase__ , map_location="cpu" )["model"]
# pop unnecessary weights
UpperCAmelCase_ =[
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowercase__ )
UpperCAmelCase_ ={
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
UpperCAmelCase_ =sd.pop(lowercase__ )
UpperCAmelCase_ =list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
UpperCAmelCase_ =sd[key]
# We split QKV in separate Q,K,V
UpperCAmelCase_ =key.replace(".qkv_proj." , ".q_proj." )
UpperCAmelCase_ =key.replace(".qkv_proj." , ".k_proj." )
UpperCAmelCase_ =key.replace(".qkv_proj." , ".v_proj." )
UpperCAmelCase_ =value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =torch.split(lowercase__ , depth // 3 , dim=0 )
UpperCAmelCase_ =q
UpperCAmelCase_ =k
UpperCAmelCase_ =v
del sd[key]
return sd
@torch.no_grad()
def a__ ( lowercase__ , lowercase__ , lowercase__=None ):
'''simple docstring'''
UpperCAmelCase_ =load_checkpoint(lowercase__ )
if config is not None:
UpperCAmelCase_ =OPTConfig.from_pretrained(lowercase__ )
else:
UpperCAmelCase_ =OPTConfig()
UpperCAmelCase_ =OPTModel(lowercase__ ).half().eval()
model.load_state_dict(lowercase__ )
# Check results
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
if __name__ == "__main__":
__lowercase : List[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
__lowercase : str =parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 54
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowercase : Union[str, Any] =logging.get_logger(__name__)
__lowercase : Optional[int] ={"""vocab_file""": """sentencepiece.bpe.model"""}
__lowercase : Any ={
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
}
__lowercase : Union[str, Any] ={
"""moussaKam/mbarthez""": 1024,
"""moussaKam/barthez""": 1024,
"""moussaKam/barthez-orangesum-title""": 1024,
}
__lowercase : Dict ="""▁"""
class A ( __lowercase ):
_snake_case =VOCAB_FILES_NAMES
_snake_case =PRETRAINED_VOCAB_FILES_MAP
_snake_case =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case =['''input_ids''', '''attention_mask''']
def __init__( self: Optional[int] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Any="<s>" , _lowerCAmelCase: int="</s>" , _lowerCAmelCase: List[Any]="</s>" , _lowerCAmelCase: List[Any]="<s>" , _lowerCAmelCase: List[str]="<unk>" , _lowerCAmelCase: str="<pad>" , _lowerCAmelCase: Tuple="<mask>" , _lowerCAmelCase: Optional[Dict[str, Any]] = None , **_lowerCAmelCase: Optional[Any] , ) -> None:
'''simple docstring'''
UpperCAmelCase_ =AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token
UpperCAmelCase_ ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
UpperCAmelCase_ =vocab_file
UpperCAmelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCAmelCase ) )
UpperCAmelCase_ ={"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
UpperCAmelCase_ =len(self.sp_model ) - 1
UpperCAmelCase_ ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def lowerCAmelCase__ ( self: str , _lowerCAmelCase: List[int] , _lowerCAmelCase: Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ =[self.cls_token_id]
UpperCAmelCase_ =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: List[int] , _lowerCAmelCase: Optional[List[int]] = None , _lowerCAmelCase: bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
def lowerCAmelCase__ ( self: Any , _lowerCAmelCase: List[int] , _lowerCAmelCase: Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase_ =[self.sep_token_id]
UpperCAmelCase_ =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCAmelCase__ ( self: Dict ) -> Dict:
'''simple docstring'''
return len(self.sp_model )
def lowerCAmelCase__ ( self: List[str] ) -> str:
'''simple docstring'''
UpperCAmelCase_ ={self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
def lowerCAmelCase__ ( self: Union[str, Any] , _lowerCAmelCase: List[Any] ) -> Optional[Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase_ =self.sp_model.PieceToId(_lowerCAmelCase )
return spm_id if spm_id else self.unk_token_id
def lowerCAmelCase__ ( self: Union[str, Any] , _lowerCAmelCase: Any ) -> str:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(_lowerCAmelCase )
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: Dict ) -> int:
'''simple docstring'''
UpperCAmelCase_ =[]
UpperCAmelCase_ =""
UpperCAmelCase_ =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowerCAmelCase ) + token
UpperCAmelCase_ =True
UpperCAmelCase_ =[]
else:
current_sub_tokens.append(_lowerCAmelCase )
UpperCAmelCase_ =False
out_string += self.sp_model.decode(_lowerCAmelCase )
return out_string.strip()
def __getstate__( self: Tuple ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =self.__dict__.copy()
UpperCAmelCase_ =None
return state
def __setstate__( self: Optional[int] , _lowerCAmelCase: str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase_ ={}
UpperCAmelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: str , _lowerCAmelCase: Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCAmelCase_ =os.path.join(
_lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , "wb" ) as fi:
UpperCAmelCase_ =self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
| 54
|
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
__lowercase : str ={
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
__lowercase : Any ={
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =(images / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase_ =images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCAmelCase_ =numpy_to_pil(lowercase__ )
return images
def a__ ( lowercase__ ):
'''simple docstring'''
if images.ndim == 3:
UpperCAmelCase_ =images[None, ...]
UpperCAmelCase_ =(images * 2_5_5).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
UpperCAmelCase_ =[Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
UpperCAmelCase_ =[Image.fromarray(lowercase__ ) for image in images]
return pil_images
| 54
| 1
|
from math import pi, sqrt, tan
def a__ ( lowercase__ ):
'''simple docstring'''
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values" )
return 6 * side_length**2
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def a__ ( lowercase__ ):
'''simple docstring'''
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values" )
return 4 * pi * radius**2
def a__ ( lowercase__ ):
'''simple docstring'''
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values" )
return 3 * pi * radius**2
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values" )
UpperCAmelCase_ =(height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values" )
return 2 * pi * radius * (height + radius)
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values" )
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori" )
return 4 * pow(lowercase__ , 2 ) * torus_radius * tube_radius
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values" )
return length * width
def a__ ( lowercase__ ):
'''simple docstring'''
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values" )
return side_length**2
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values" )
return (base * height) / 2
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle" )
UpperCAmelCase_ =(sidea + sidea + sidea) / 2
UpperCAmelCase_ =sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values" )
return base * height
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values" )
return 1 / 2 * (basea + basea) * height
def a__ ( lowercase__ ):
'''simple docstring'''
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values" )
return pi * radius**2
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values" )
return pi * radius_x * radius_y
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values" )
return 1 / 2 * diagonal_a * diagonal_a
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides" )
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \
length of a side" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("""[DEMO] Areas of various geometric shapes: \n""")
print(f"""Rectangle: {area_rectangle(10, 20) = }""")
print(f"""Square: {area_square(10) = }""")
print(f"""Triangle: {area_triangle(10, 10) = }""")
print(f"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""")
print(f"""Parallelogram: {area_parallelogram(10, 20) = }""")
print(f"""Rhombus: {area_rhombus(10, 20) = }""")
print(f"""Trapezium: {area_trapezium(10, 20, 30) = }""")
print(f"""Circle: {area_circle(20) = }""")
print(f"""Ellipse: {area_ellipse(10, 20) = }""")
print("""\nSurface Areas of various geometric shapes: \n""")
print(f"""Cube: {surface_area_cube(20) = }""")
print(f"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""")
print(f"""Sphere: {surface_area_sphere(20) = }""")
print(f"""Hemisphere: {surface_area_hemisphere(20) = }""")
print(f"""Cone: {surface_area_cone(10, 20) = }""")
print(f"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""")
print(f"""Cylinder: {surface_area_cylinder(10, 20) = }""")
print(f"""Torus: {surface_area_torus(20, 10) = }""")
print(f"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""")
print(f"""Square: {area_reg_polygon(4, 10) = }""")
print(f"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""")
| 54
|
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =int(lowercase__ )
if n_element < 1:
UpperCAmelCase_ =ValueError("a should be a positive number" )
raise my_error
UpperCAmelCase_ =[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =(0, 0, 0)
UpperCAmelCase_ =1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
__lowercase : Tuple =input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
__lowercase : Union[str, Any] =hamming(int(n))
print("""-----------------------------------------------------""")
print(f"""The list with nth numbers is: {hamming_numbers}""")
print("""-----------------------------------------------------""")
| 54
| 1
|
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class A :
@staticmethod
def lowerCAmelCase__ ( *_lowerCAmelCase: Tuple , **_lowerCAmelCase: Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
pass
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =hashlib.mda(image.tobytes() )
return m.hexdigest()[:1_0]
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =np.array(lowercase__ )
UpperCAmelCase_ =npimg.shape
return {"hash": hashimage(lowercase__ ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class A ( unittest.TestCase ):
_snake_case =dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
_snake_case =dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def lowerCAmelCase__ ( self: str , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: int , _lowerCAmelCase: str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =MaskGenerationPipeline(model=_lowerCAmelCase , image_processor=_lowerCAmelCase )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: int , _lowerCAmelCase: Optional[Any] ) -> Dict:
'''simple docstring'''
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF" )
def lowerCAmelCase__ ( self: str ) -> int:
'''simple docstring'''
pass
@slow
@require_torch
def lowerCAmelCase__ ( self: Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =pipeline("mask-generation" , model="facebook/sam-vit-huge" )
UpperCAmelCase_ =image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=256 )
# Shortening by hashing
UpperCAmelCase_ =[]
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(_lowerCAmelCase ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.04_44},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0_21},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.01_67},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.01_32},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.00_53},
{"mask": {"hash": "e2d0b7a0b7", "shape": (480, 640)}, "scores": 0.99_67},
{"mask": {"hash": "453c7844bd", "shape": (480, 640)}, "scores": 0.9_93},
{"mask": {"hash": "3d44f2926d", "shape": (480, 640)}, "scores": 0.99_09},
{"mask": {"hash": "64033ddc3f", "shape": (480, 640)}, "scores": 0.98_79},
{"mask": {"hash": "801064ff79", "shape": (480, 640)}, "scores": 0.98_34},
{"mask": {"hash": "6172f276ef", "shape": (480, 640)}, "scores": 0.97_16},
{"mask": {"hash": "b49e60e084", "shape": (480, 640)}, "scores": 0.96_12},
{"mask": {"hash": "a811e775fd", "shape": (480, 640)}, "scores": 0.95_99},
{"mask": {"hash": "a6a8ebcf4b", "shape": (480, 640)}, "scores": 0.95_52},
{"mask": {"hash": "9d8257e080", "shape": (480, 640)}, "scores": 0.95_32},
{"mask": {"hash": "32de6454a8", "shape": (480, 640)}, "scores": 0.95_16},
{"mask": {"hash": "af3d4af2c8", "shape": (480, 640)}, "scores": 0.94_99},
{"mask": {"hash": "3c6db475fb", "shape": (480, 640)}, "scores": 0.94_83},
{"mask": {"hash": "c290813fb9", "shape": (480, 640)}, "scores": 0.94_64},
{"mask": {"hash": "b6f0b8f606", "shape": (480, 640)}, "scores": 0.9_43},
{"mask": {"hash": "92ce16bfdf", "shape": (480, 640)}, "scores": 0.9_43},
{"mask": {"hash": "c749b25868", "shape": (480, 640)}, "scores": 0.94_08},
{"mask": {"hash": "efb6cab859", "shape": (480, 640)}, "scores": 0.93_35},
{"mask": {"hash": "1ff2eafb30", "shape": (480, 640)}, "scores": 0.93_26},
{"mask": {"hash": "788b798e24", "shape": (480, 640)}, "scores": 0.92_62},
{"mask": {"hash": "abea804f0e", "shape": (480, 640)}, "scores": 0.89_99},
{"mask": {"hash": "7b9e8ddb73", "shape": (480, 640)}, "scores": 0.89_86},
{"mask": {"hash": "cd24047c8a", "shape": (480, 640)}, "scores": 0.89_84},
{"mask": {"hash": "6943e6bcbd", "shape": (480, 640)}, "scores": 0.88_73},
{"mask": {"hash": "b5f47c9191", "shape": (480, 640)}, "scores": 0.88_71}
] , )
# fmt: on
@require_torch
@slow
def lowerCAmelCase__ ( self: str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ ="facebook/sam-vit-huge"
UpperCAmelCase_ =pipeline("mask-generation" , model=_lowerCAmelCase )
UpperCAmelCase_ =image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
UpperCAmelCase_ =[]
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(_lowerCAmelCase ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.04_44},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.02_10},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.01_67},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.01_32},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.00_53},
] , )
| 54
|
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
__lowercase : List[Any] =logging.get_logger(__name__)
class A ( __lowercase ):
def __init__( self: List[Any] , *_lowerCAmelCase: Optional[Any] , **_lowerCAmelCase: List[str] ) -> None:
'''simple docstring'''
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 54
| 1
|
from decimal import Decimal, getcontext
from math import ceil, factorial
def a__ ( lowercase__ ):
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
UpperCAmelCase_ =precision
UpperCAmelCase_ =ceil(precision / 1_4 )
UpperCAmelCase_ =4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
UpperCAmelCase_ =1
UpperCAmelCase_ =1_3_5_9_1_4_0_9
UpperCAmelCase_ =Decimal(lowercase__ )
for k in range(1 , lowercase__ ):
UpperCAmelCase_ =factorial(6 * k ) // (factorial(3 * k ) * factorial(lowercase__ ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__lowercase : List[str] =50
print(f"""The first {n} digits of pi is: {pi(n)}""")
| 54
|
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class A ( __lowercase , unittest.TestCase ):
_snake_case =CanineTokenizer
_snake_case =False
def lowerCAmelCase__ ( self: Optional[Any] ) -> List[str]:
'''simple docstring'''
super().setUp()
UpperCAmelCase_ =CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCAmelCase__ ( self: Optional[int] ) -> List[str]:
'''simple docstring'''
return CanineTokenizer.from_pretrained("google/canine-s" )
def lowerCAmelCase__ ( self: Union[str, Any] , **_lowerCAmelCase: List[Any] ) -> CanineTokenizer:
'''simple docstring'''
UpperCAmelCase_ =self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
UpperCAmelCase_ =1024
return tokenizer
@require_torch
def lowerCAmelCase__ ( self: int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.canine_tokenizer
UpperCAmelCase_ =["Life is like a box of chocolates.", "You never know what you're gonna get."]
# fmt: off
UpperCAmelCase_ =[5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0]
# fmt: on
UpperCAmelCase_ =tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =list(batch.input_ids.numpy()[0] )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def lowerCAmelCase__ ( self: int ) -> str:
'''simple docstring'''
UpperCAmelCase_ =self.canine_tokenizer
UpperCAmelCase_ =["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
UpperCAmelCase_ =tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids" , _lowerCAmelCase )
self.assertIn("attention_mask" , _lowerCAmelCase )
self.assertIn("token_type_ids" , _lowerCAmelCase )
@require_torch
def lowerCAmelCase__ ( self: str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =self.canine_tokenizer
UpperCAmelCase_ =[
"What's the weater?",
"It's about 25 degrees.",
]
UpperCAmelCase_ =tokenizer(
text_target=_lowerCAmelCase , max_length=32 , padding="max_length" , truncation=_lowerCAmelCase , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def lowerCAmelCase__ ( self: Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
UpperCAmelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ =tempfile.mkdtemp()
UpperCAmelCase_ =" He is very happy, UNwant\u00E9d,running"
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.__class__.from_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
shutil.rmtree(_lowerCAmelCase )
UpperCAmelCase_ =self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ =tempfile.mkdtemp()
UpperCAmelCase_ =" He is very happy, UNwant\u00E9d,running"
UpperCAmelCase_ =tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
UpperCAmelCase_ =chr(0xe0_07 )
additional_special_tokens.append(_lowerCAmelCase )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.__class__.from_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertIn(_lowerCAmelCase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
UpperCAmelCase_ =tokenizer.__class__.from_pretrained(_lowerCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_lowerCAmelCase )
def lowerCAmelCase__ ( self: int ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ , UpperCAmelCase_ =self.get_clean_sequence(_lowerCAmelCase )
# a special token for Canine can be defined as follows:
UpperCAmelCase_ =0xe0_05
UpperCAmelCase_ =chr(_lowerCAmelCase )
tokenizer.add_special_tokens({"cls_token": special_token} )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
UpperCAmelCase_ =tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , input_encoded + special_token_id )
UpperCAmelCase_ =tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def lowerCAmelCase__ ( self: Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ =chr(0xe0_05 )
UpperCAmelCase_ =chr(0xe0_06 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=_lowerCAmelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} )
UpperCAmelCase_ =tokenizer.tokenize(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.tokenize(_lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
self.assertEqual(token_a[0] , _lowerCAmelCase )
self.assertEqual(token_a[0] , _lowerCAmelCase )
@require_tokenizers
def lowerCAmelCase__ ( self: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# a special token for Canine can be defined as follows:
UpperCAmelCase_ =0xe0_06
UpperCAmelCase_ =chr(_lowerCAmelCase )
UpperCAmelCase_ =AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(_lowerCAmelCase )
tokenizer.from_pretrained(_lowerCAmelCase )
def lowerCAmelCase__ ( self: Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase_ =json.load(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase_ =json.load(_lowerCAmelCase )
# a special token for Canine can be defined as follows:
UpperCAmelCase_ =0xe0_06
UpperCAmelCase_ =chr(_lowerCAmelCase )
UpperCAmelCase_ =[new_token_a]
UpperCAmelCase_ =[new_token_a]
with open(os.path.join(_lowerCAmelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase_ =tokenizer_class.from_pretrained(_lowerCAmelCase , extra_ids=0 )
self.assertIn(_lowerCAmelCase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
UpperCAmelCase_ =0xe0_07
UpperCAmelCase_ =chr(_lowerCAmelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase_ =[AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase )]
UpperCAmelCase_ =tokenizer_class.from_pretrained(
_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , extra_ids=0 )
self.assertIn(_lowerCAmelCase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def lowerCAmelCase__ ( self: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ ="hello world"
if self.space_between_special_tokens:
UpperCAmelCase_ ="[CLS] hello world [SEP]"
else:
UpperCAmelCase_ =input
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.decode(_lowerCAmelCase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(_lowerCAmelCase , [output, output.lower()] )
def lowerCAmelCase__ ( self: List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ =[
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
UpperCAmelCase_ ="a"
UpperCAmelCase_ =ord(_lowerCAmelCase )
for attr in attributes_list:
setattr(_lowerCAmelCase , attr + "_id" , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , attr + "_id" ) , _lowerCAmelCase )
setattr(_lowerCAmelCase , attr + "_id" , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , attr + "_id" ) , _lowerCAmelCase )
setattr(_lowerCAmelCase , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens_ids" ) , [] )
UpperCAmelCase_ =0xe0_06
UpperCAmelCase_ =chr(_lowerCAmelCase )
setattr(_lowerCAmelCase , "additional_special_tokens_ids" , [additional_special_token_id] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens" ) , [additional_special_token] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens_ids" ) , [additional_special_token_id] )
def lowerCAmelCase__ ( self: List[str] ) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Dict ) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Dict:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Any ) -> List[Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: List[Any] ) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Tuple ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: str ) -> str:
'''simple docstring'''
pass
| 54
| 1
|
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =int(lowercase__ )
if n_element < 1:
UpperCAmelCase_ =ValueError("a should be a positive number" )
raise my_error
UpperCAmelCase_ =[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =(0, 0, 0)
UpperCAmelCase_ =1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
__lowercase : Tuple =input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
__lowercase : Union[str, Any] =hamming(int(n))
print("""-----------------------------------------------------""")
print(f"""The list with nth numbers is: {hamming_numbers}""")
print("""-----------------------------------------------------""")
| 54
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowercase : Optional[int] ="""\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
__lowercase : Dict ="""\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
__lowercase : List[str] ="""\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def lowerCAmelCase__ ( self: int ) -> MetricInfo:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: List[List[List[str]]] , _lowerCAmelCase: List[List[str]] , _lowerCAmelCase: int = 1 , _lowerCAmelCase: int = 4 , ) -> Dict[str, float]:
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_lowerCAmelCase , hypotheses=_lowerCAmelCase , min_len=_lowerCAmelCase , max_len=_lowerCAmelCase )
}
| 54
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowercase : Any ={
"""configuration_efficientformer""": [
"""EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientFormerConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] =["""EfficientFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : str =[
"""EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientFormerForImageClassification""",
"""EfficientFormerForImageClassificationWithTeacher""",
"""EfficientFormerModel""",
"""EfficientFormerPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any =[
"""TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFEfficientFormerForImageClassification""",
"""TFEfficientFormerForImageClassificationWithTeacher""",
"""TFEfficientFormerModel""",
"""TFEfficientFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
__lowercase : List[str] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 54
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A ( __lowercase , unittest.TestCase ):
_snake_case =KandinskyVaaImgaImgPipeline
_snake_case =['''image_embeds''', '''negative_image_embeds''', '''image''']
_snake_case =[
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
_snake_case =[
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_snake_case =False
@property
def lowerCAmelCase__ ( self: List[Any] ) -> Dict:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self: Any ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> List[str]:
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__ ( self: List[str] ) -> Dict:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self: int ) -> str:
'''simple docstring'''
return 100
@property
def lowerCAmelCase__ ( self: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ ={
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCAmelCase_ =UNetaDConditionModel(**_lowerCAmelCase )
return model
@property
def lowerCAmelCase__ ( self: Any ) -> Tuple:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ =VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase__ ( self: Dict ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =self.dummy_unet
UpperCAmelCase_ =self.dummy_movq
UpperCAmelCase_ ={
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
UpperCAmelCase_ =DDIMScheduler(**_lowerCAmelCase )
UpperCAmelCase_ ={
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: Any , _lowerCAmelCase: Optional[Any]=0 ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
UpperCAmelCase_ =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowerCAmelCase )
# create init_image
UpperCAmelCase_ =floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
UpperCAmelCase_ =image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ =Image.fromarray(np.uinta(_lowerCAmelCase ) ).convert("RGB" ).resize((256, 256) )
if str(_lowerCAmelCase ).startswith("mps" ):
UpperCAmelCase_ =torch.manual_seed(_lowerCAmelCase )
else:
UpperCAmelCase_ =torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
UpperCAmelCase_ ={
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def lowerCAmelCase__ ( self: int ) -> int:
'''simple docstring'''
UpperCAmelCase_ ="cpu"
UpperCAmelCase_ =self.get_dummy_components()
UpperCAmelCase_ =self.pipeline_class(**_lowerCAmelCase )
UpperCAmelCase_ =pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
UpperCAmelCase_ =output.images
UpperCAmelCase_ =pipe(
**self.get_dummy_inputs(_lowerCAmelCase ) , return_dict=_lowerCAmelCase , )[0]
UpperCAmelCase_ =image[0, -3:, -3:, -1]
UpperCAmelCase_ =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ =np.array(
[0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def lowerCAmelCase__ ( self: List[Any] ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: int ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_img2img_frog.npy" )
UpperCAmelCase_ =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
UpperCAmelCase_ ="A red cartoon frog, 4k"
UpperCAmelCase_ =KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCAmelCase )
UpperCAmelCase_ =KandinskyVaaImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
UpperCAmelCase_ =pipeline.to(_lowerCAmelCase )
pipeline.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ , UpperCAmelCase_ =pipe_prior(
_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCAmelCase_ =pipeline(
image=_lowerCAmelCase , image_embeds=_lowerCAmelCase , negative_image_embeds=_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
UpperCAmelCase_ =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
| 54
| 1
|
__lowercase : List[Any] =[
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =[False] * len(lowercase__ )
UpperCAmelCase_ =[s]
UpperCAmelCase_ =True
while queue:
UpperCAmelCase_ =queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowercase__ )
UpperCAmelCase_ =True
UpperCAmelCase_ =u
return visited[t]
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =[-1] * (len(lowercase__ ))
UpperCAmelCase_ =0
UpperCAmelCase_ =[]
UpperCAmelCase_ =[i[:] for i in graph] # Record original cut, copy.
while bfs(lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
UpperCAmelCase_ =float("Inf" )
UpperCAmelCase_ =sink
while s != source:
# Find the minimum value in select path
UpperCAmelCase_ =min(lowercase__ , graph[parent[s]][s] )
UpperCAmelCase_ =parent[s]
max_flow += path_flow
UpperCAmelCase_ =sink
while v != source:
UpperCAmelCase_ =parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCAmelCase_ =parent[v]
for i in range(len(lowercase__ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 54
|
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class A ( unittest.TestCase ):
def __init__( self: Optional[int] , _lowerCAmelCase: Tuple , _lowerCAmelCase: Optional[Any]=13 , _lowerCAmelCase: Optional[int]=7 , _lowerCAmelCase: Any=True , _lowerCAmelCase: List[Any]=True , _lowerCAmelCase: List[str]=True , _lowerCAmelCase: str=True , _lowerCAmelCase: Optional[int]=99 , _lowerCAmelCase: Any=32 , _lowerCAmelCase: Any=5 , _lowerCAmelCase: Tuple=4 , _lowerCAmelCase: Union[str, Any]=37 , _lowerCAmelCase: List[str]="gelu" , _lowerCAmelCase: Dict=0.1 , _lowerCAmelCase: Tuple=0.1 , _lowerCAmelCase: int=512 , _lowerCAmelCase: Tuple=16 , _lowerCAmelCase: Tuple=2 , _lowerCAmelCase: str=0.02 , _lowerCAmelCase: Optional[Any]=4 , ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =parent
UpperCAmelCase_ =batch_size
UpperCAmelCase_ =seq_length
UpperCAmelCase_ =is_training
UpperCAmelCase_ =use_attention_mask
UpperCAmelCase_ =use_token_type_ids
UpperCAmelCase_ =use_labels
UpperCAmelCase_ =vocab_size
UpperCAmelCase_ =hidden_size
UpperCAmelCase_ =num_hidden_layers
UpperCAmelCase_ =num_attention_heads
UpperCAmelCase_ =intermediate_size
UpperCAmelCase_ =hidden_act
UpperCAmelCase_ =hidden_dropout_prob
UpperCAmelCase_ =attention_probs_dropout_prob
UpperCAmelCase_ =max_position_embeddings
UpperCAmelCase_ =type_vocab_size
UpperCAmelCase_ =type_sequence_label_size
UpperCAmelCase_ =initializer_range
UpperCAmelCase_ =num_choices
def lowerCAmelCase__ ( self: Dict ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ =None
if self.use_attention_mask:
UpperCAmelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ =None
if self.use_token_type_ids:
UpperCAmelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ =RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self: str ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =config_and_inputs
UpperCAmelCase_ ={"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self: Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =config_and_inputs
UpperCAmelCase_ =True
UpperCAmelCase_ =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase_ =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class A ( __lowercase , unittest.TestCase ):
_snake_case =True
_snake_case =(
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self: Dict ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =FlaxRobertaModelTester(self )
@slow
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase_ =model_class_name.from_pretrained("roberta-base" , from_pt=_lowerCAmelCase )
UpperCAmelCase_ =model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCAmelCase )
| 54
| 1
|
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase : Optional[Any] =get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_sentencepiece_available():
import sentencepiece as sp
__lowercase : List[str] =5
__lowercase : int =10
@require_sentencepiece
@require_tokenizers
class A ( __lowercase , unittest.TestCase ):
_snake_case =SpeechaTextTokenizer
_snake_case =False
_snake_case =True
def lowerCAmelCase__ ( self: List[str] ) -> Any:
'''simple docstring'''
super().setUp()
UpperCAmelCase_ =sp.SentencePieceProcessor()
spm_model.Load(_lowerCAmelCase )
UpperCAmelCase_ =["<s>", "<pad>", "</s>", "<unk>"]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(_lowerCAmelCase ) )]
UpperCAmelCase_ =dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
UpperCAmelCase_ =Path(self.tmpdirname )
save_json(_lowerCAmelCase , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_lowerCAmelCase , save_dir / VOCAB_FILES_NAMES["spm_file"] )
UpperCAmelCase_ =SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self: Any ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ ="<pad>"
UpperCAmelCase_ =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) , _lowerCAmelCase )
def lowerCAmelCase__ ( self: List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(_lowerCAmelCase ) , 1001 )
def lowerCAmelCase__ ( self: Optional[Any] ) -> Tuple:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1001 )
def lowerCAmelCase__ ( self: Optional[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
UpperCAmelCase_ =tokenizer.tokenize("This is a test" )
self.assertListEqual(_lowerCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [289, 50, 14, 174, 386] , )
UpperCAmelCase_ =tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_lowerCAmelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
UpperCAmelCase_ =tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
UpperCAmelCase_ =tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def lowerCAmelCase__ ( self: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ ={"input_ids": [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase , model_name="facebook/s2t-small-mustc-en-de-st" , revision="a14f04cf0776c02f62a8cb800cf7909e15ea23ad" , )
@require_sentencepiece
class A ( unittest.TestCase ):
_snake_case ='''valhalla/s2t_mustc_multilinguial_medium'''
_snake_case ='''C\'est trop cool'''
_snake_case ='''Esto es genial'''
@classmethod
def lowerCAmelCase__ ( cls: str ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def lowerCAmelCase__ ( self: str ) -> List[str]:
'''simple docstring'''
self.assertEqual(self.tokenizer.lang_code_to_id["pt"] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["ru"] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["it"] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["de"] , 11 )
def lowerCAmelCase__ ( self: Any ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.vocab_size , 1_0000 )
def lowerCAmelCase__ ( self: str ) -> int:
'''simple docstring'''
self.assertIn(_lowerCAmelCase , self.tokenizer.all_special_ids )
UpperCAmelCase_ =[ES_CODE, 4, 1601, 47, 7647, 2]
UpperCAmelCase_ =self.tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
UpperCAmelCase_ =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , _lowerCAmelCase )
def lowerCAmelCase__ ( self: Dict ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ ="fr"
UpperCAmelCase_ =self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , _lowerCAmelCase )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def lowerCAmelCase__ ( self: List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ ="fr"
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
UpperCAmelCase_ ="es"
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 54
|
from __future__ import annotations
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if b == 0:
return (1, 0)
((UpperCAmelCase_) , (UpperCAmelCase_)) =extended_euclid(lowercase__ , a % b )
UpperCAmelCase_ =a // b
return (y, x - k * y)
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
((UpperCAmelCase_) , (UpperCAmelCase_)) =extended_euclid(lowercase__ , lowercase__ )
UpperCAmelCase_ =na * na
UpperCAmelCase_ =ra * x * na + ra * y * na
return (n % m + m) % m
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
((UpperCAmelCase_) , (UpperCAmelCase_)) =extended_euclid(lowercase__ , lowercase__ )
if b < 0:
UpperCAmelCase_ =(b % n + n) % n
return b
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =invert_modulo(lowercase__ , lowercase__ ), invert_modulo(lowercase__ , lowercase__ )
UpperCAmelCase_ =na * na
UpperCAmelCase_ =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="""chinese_remainder_theorem""", verbose=True)
testmod(name="""chinese_remainder_theorem2""", verbose=True)
testmod(name="""invert_modulo""", verbose=True)
testmod(name="""extended_euclid""", verbose=True)
| 54
| 1
|
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__lowercase : Optional[Any] =False
class A ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def lowerCAmelCase__ ( self: Tuple ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
UpperCAmelCase_ =torch.manual_seed(0 )
UpperCAmelCase_ =pipe(
image=_lowerCAmelCase , generator=_lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
UpperCAmelCase_ =image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ =np.array([0.04_41, 0.04_69, 0.05_07, 0.05_75, 0.06_32, 0.06_50, 0.08_65, 0.09_09, 0.09_45] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 54
|
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__lowercase : Tuple =logging.getLogger(__name__)
__lowercase : Optional[int] =tf.data.AUTOTUNE
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =argparse.ArgumentParser(description="Train a masked language model on TPU." )
parser.add_argument(
"--pretrained_model_config" , type=lowercase__ , default="roberta-base" , help="The model config to use. Note that we don't copy the model's weights, only the config!" , )
parser.add_argument(
"--tokenizer" , type=lowercase__ , default="unigram-tokenizer-wikitext" , help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size." , )
parser.add_argument(
"--per_replica_batch_size" , type=lowercase__ , default=8 , help="Batch size per TPU core." , )
parser.add_argument(
"--no_tpu" , action="store_true" , help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances." , )
parser.add_argument(
"--tpu_name" , type=lowercase__ , help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs." , default="local" , )
parser.add_argument(
"--tpu_zone" , type=lowercase__ , help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." , )
parser.add_argument(
"--gcp_project" , type=lowercase__ , help="Google cloud project name. Only used for non-Colab TPU nodes." )
parser.add_argument(
"--bfloat16" , action="store_true" , help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." , )
parser.add_argument(
"--train_dataset" , type=lowercase__ , help="Path to training dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--shuffle_buffer_size" , type=lowercase__ , default=2**1_8 , help="Size of the shuffle buffer (in samples)" , )
parser.add_argument(
"--eval_dataset" , type=lowercase__ , help="Path to evaluation dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--num_epochs" , type=lowercase__ , default=1 , help="Number of epochs to train for." , )
parser.add_argument(
"--learning_rate" , type=lowercase__ , default=1E-4 , help="Learning rate to use for training." , )
parser.add_argument(
"--weight_decay_rate" , type=lowercase__ , default=1E-3 , help="Weight decay rate to use for training." , )
parser.add_argument(
"--max_length" , type=lowercase__ , default=5_1_2 , help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" , )
parser.add_argument(
"--mlm_probability" , type=lowercase__ , default=0.15 , help="Fraction of tokens to mask during training." , )
parser.add_argument("--output_dir" , type=lowercase__ , required=lowercase__ , help="Path to save model checkpoints to." )
parser.add_argument("--hub_model_id" , type=lowercase__ , help="Model ID to upload to on the Hugging Face Hub." )
UpperCAmelCase_ =parser.parse_args()
return args
def a__ ( lowercase__ ):
'''simple docstring'''
try:
if args.tpu_name:
UpperCAmelCase_ =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
UpperCAmelCase_ =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or "
"--gcp_project. When running on a TPU VM, use --tpu_name local." )
tf.config.experimental_connect_to_cluster(lowercase__ )
tf.tpu.experimental.initialize_tpu_system(lowercase__ )
return tpu
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =0
for file in file_list:
UpperCAmelCase_ =file.split("/" )[-1]
UpperCAmelCase_ =re.search(R"-\d+-(\d+)\.tfrecord" , lowercase__ ).group(1 )
UpperCAmelCase_ =int(lowercase__ )
num_samples += sample_count
return num_samples
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=None ):
'''simple docstring'''
UpperCAmelCase_ =count_samples(lowercase__ )
UpperCAmelCase_ =tf.data.Dataset.from_tensor_slices(lowercase__ )
if shuffle:
UpperCAmelCase_ =dataset.shuffle(len(lowercase__ ) )
UpperCAmelCase_ =tf.data.TFRecordDataset(lowercase__ , num_parallel_reads=lowercase__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
UpperCAmelCase_ =dataset.apply(tf.data.experimental.assert_cardinality(lowercase__ ) )
UpperCAmelCase_ =dataset.map(lowercase__ , num_parallel_calls=lowercase__ )
if shuffle:
assert shuffle_buffer_size is not None
UpperCAmelCase_ =dataset.shuffle(args.shuffle_buffer_size )
UpperCAmelCase_ =dataset.batch(lowercase__ , drop_remainder=lowercase__ )
UpperCAmelCase_ =dataset.map(lowercase__ , num_parallel_calls=lowercase__ )
UpperCAmelCase_ =dataset.prefetch(lowercase__ )
return dataset
def a__ ( lowercase__ ):
'''simple docstring'''
if not args.no_tpu:
UpperCAmelCase_ =initialize_tpu(lowercase__ )
UpperCAmelCase_ =tf.distribute.TPUStrategy(lowercase__ )
else:
UpperCAmelCase_ =tf.distribute.OneDeviceStrategy(device="/gpu:0" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" )
UpperCAmelCase_ =AutoTokenizer.from_pretrained(args.tokenizer )
UpperCAmelCase_ =AutoConfig.from_pretrained(args.pretrained_model_config )
UpperCAmelCase_ =tokenizer.vocab_size
UpperCAmelCase_ =tf.io.gfile.glob(os.path.join(args.train_dataset , "*.tfrecord" ) )
if not training_records:
raise ValueError(F'No .tfrecord files found in {args.train_dataset}.' )
UpperCAmelCase_ =tf.io.gfile.glob(os.path.join(args.eval_dataset , "*.tfrecord" ) )
if not eval_records:
raise ValueError(F'No .tfrecord files found in {args.eval_dataset}.' )
UpperCAmelCase_ =count_samples(lowercase__ )
UpperCAmelCase_ =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
UpperCAmelCase_ =steps_per_epoch * args.num_epochs
with strategy.scope():
UpperCAmelCase_ =TFAutoModelForMaskedLM.from_config(lowercase__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
UpperCAmelCase_ , UpperCAmelCase_ =create_optimizer(
num_train_steps=lowercase__ , num_warmup_steps=total_train_steps // 2_0 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowercase__ , metrics=["accuracy"] )
def decode_fn(lowercase__ ):
UpperCAmelCase_ ={
"input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowercase__ , lowercase__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
UpperCAmelCase_ =DataCollatorForLanguageModeling(
tokenizer=lowercase__ , mlm_probability=args.mlm_probability , mlm=lowercase__ , return_tensors="tf" )
def mask_with_collator(lowercase__ ):
# TF really needs an isin() function
UpperCAmelCase_ =(
~tf.cast(batch["attention_mask"] , tf.bool )
| (batch["input_ids"] == tokenizer.cls_token_id)
| (batch["input_ids"] == tokenizer.sep_token_id)
)
UpperCAmelCase_ , UpperCAmelCase_ =data_collator.tf_mask_tokens(
batch["input_ids"] , vocab_size=len(lowercase__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowercase__ , )
return batch
UpperCAmelCase_ =args.per_replica_batch_size * strategy.num_replicas_in_sync
UpperCAmelCase_ =prepare_dataset(
lowercase__ , decode_fn=lowercase__ , mask_fn=lowercase__ , batch_size=lowercase__ , shuffle=lowercase__ , shuffle_buffer_size=args.shuffle_buffer_size , )
UpperCAmelCase_ =prepare_dataset(
lowercase__ , decode_fn=lowercase__ , mask_fn=lowercase__ , batch_size=lowercase__ , shuffle=lowercase__ , )
UpperCAmelCase_ =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowercase__ ) )
model.fit(
lowercase__ , validation_data=lowercase__ , epochs=args.num_epochs , callbacks=lowercase__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__lowercase : Union[str, Any] =parse_args()
main(args)
| 54
| 1
|
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class A ( __lowercase ):
_snake_case =42
class A ( __lowercase , __lowercase ):
_snake_case =True
@register_to_config
def __init__( self: List[str] , _lowerCAmelCase: int = 3 , _lowerCAmelCase: int = 3 , _lowerCAmelCase: Tuple[str] = ("DownEncoderBlock2D",) , _lowerCAmelCase: Tuple[str] = ("UpDecoderBlock2D",) , _lowerCAmelCase: Tuple[int] = (64,) , _lowerCAmelCase: int = 1 , _lowerCAmelCase: str = "silu" , _lowerCAmelCase: int = 4 , _lowerCAmelCase: int = 32 , _lowerCAmelCase: int = 32 , _lowerCAmelCase: float = 0.1_82_15 , ) -> Tuple:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
UpperCAmelCase_ =Encoder(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , down_block_types=_lowerCAmelCase , block_out_channels=_lowerCAmelCase , layers_per_block=_lowerCAmelCase , act_fn=_lowerCAmelCase , norm_num_groups=_lowerCAmelCase , double_z=_lowerCAmelCase , )
# pass init params to Decoder
UpperCAmelCase_ =Decoder(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , up_block_types=_lowerCAmelCase , block_out_channels=_lowerCAmelCase , layers_per_block=_lowerCAmelCase , norm_num_groups=_lowerCAmelCase , act_fn=_lowerCAmelCase , )
UpperCAmelCase_ =nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
UpperCAmelCase_ =nn.Convad(_lowerCAmelCase , _lowerCAmelCase , 1 )
UpperCAmelCase_ =False
UpperCAmelCase_ =False
# only relevant if vae tiling is enabled
UpperCAmelCase_ =self.config.sample_size
UpperCAmelCase_ =(
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
UpperCAmelCase_ =int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
UpperCAmelCase_ =0.25
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: Any , _lowerCAmelCase: Optional[int]=False ) -> Tuple:
'''simple docstring'''
if isinstance(_lowerCAmelCase , (Encoder, Decoder) ):
UpperCAmelCase_ =value
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: bool = True ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =use_tiling
def lowerCAmelCase__ ( self: int ) -> Optional[int]:
'''simple docstring'''
self.enable_tiling(_lowerCAmelCase )
def lowerCAmelCase__ ( self: List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =True
def lowerCAmelCase__ ( self: Any ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowerCAmelCase__ ( self: Any ) -> Dict[str, AttentionProcessor]:
'''simple docstring'''
UpperCAmelCase_ ={}
def fn_recursive_add_processors(_lowerCAmelCase: str , _lowerCAmelCase: torch.nn.Module , _lowerCAmelCase: Dict[str, AttentionProcessor] ):
if hasattr(_lowerCAmelCase , "set_processor" ):
UpperCAmelCase_ =module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'{name}.{sub_name}' , _lowerCAmelCase , _lowerCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return processors
def lowerCAmelCase__ ( self: Union[str, Any] , _lowerCAmelCase: Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =len(self.attn_processors.keys() )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(_lowerCAmelCase ) != count:
raise ValueError(
F'A dict of processors was passed, but the number of processors {len(_lowerCAmelCase )} does not match the'
F' number of attention layers: {count}. Please make sure to pass {count} processor classes.' )
def fn_recursive_attn_processor(_lowerCAmelCase: str , _lowerCAmelCase: torch.nn.Module , _lowerCAmelCase: Tuple ):
if hasattr(_lowerCAmelCase , "set_processor" ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
module.set_processor(_lowerCAmelCase )
else:
module.set_processor(processor.pop(F'{name}.processor' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'{name}.{sub_name}' , _lowerCAmelCase , _lowerCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase__ ( self: Dict ) -> int:
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: bool = True ) -> AutoencoderKLOutput:
'''simple docstring'''
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_lowerCAmelCase , return_dict=_lowerCAmelCase )
if self.use_slicing and x.shape[0] > 1:
UpperCAmelCase_ =[self.encoder(_lowerCAmelCase ) for x_slice in x.split(1 )]
UpperCAmelCase_ =torch.cat(_lowerCAmelCase )
else:
UpperCAmelCase_ =self.encoder(_lowerCAmelCase )
UpperCAmelCase_ =self.quant_conv(_lowerCAmelCase )
UpperCAmelCase_ =DiagonalGaussianDistribution(_lowerCAmelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_lowerCAmelCase )
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_lowerCAmelCase , return_dict=_lowerCAmelCase )
UpperCAmelCase_ =self.post_quant_conv(_lowerCAmelCase )
UpperCAmelCase_ =self.decoder(_lowerCAmelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowerCAmelCase )
@apply_forward_hook
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
if self.use_slicing and z.shape[0] > 1:
UpperCAmelCase_ =[self._decode(_lowerCAmelCase ).sample for z_slice in z.split(1 )]
UpperCAmelCase_ =torch.cat(_lowerCAmelCase )
else:
UpperCAmelCase_ =self._decode(_lowerCAmelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_lowerCAmelCase )
def lowerCAmelCase__ ( self: str , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: str , _lowerCAmelCase: Dict ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =min(a.shape[2] , b.shape[2] , _lowerCAmelCase )
for y in range(_lowerCAmelCase ):
UpperCAmelCase_ =a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =min(a.shape[3] , b.shape[3] , _lowerCAmelCase )
for x in range(_lowerCAmelCase ):
UpperCAmelCase_ =a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def lowerCAmelCase__ ( self: str , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: bool = True ) -> AutoencoderKLOutput:
'''simple docstring'''
UpperCAmelCase_ =int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ =int(self.tile_latent_min_size * self.tile_overlap_factor )
UpperCAmelCase_ =self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
UpperCAmelCase_ =[]
for i in range(0 , x.shape[2] , _lowerCAmelCase ):
UpperCAmelCase_ =[]
for j in range(0 , x.shape[3] , _lowerCAmelCase ):
UpperCAmelCase_ =x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
UpperCAmelCase_ =self.encoder(_lowerCAmelCase )
UpperCAmelCase_ =self.quant_conv(_lowerCAmelCase )
row.append(_lowerCAmelCase )
rows.append(_lowerCAmelCase )
UpperCAmelCase_ =[]
for i, row in enumerate(_lowerCAmelCase ):
UpperCAmelCase_ =[]
for j, tile in enumerate(_lowerCAmelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ =self.blend_v(rows[i - 1][j] , _lowerCAmelCase , _lowerCAmelCase )
if j > 0:
UpperCAmelCase_ =self.blend_h(row[j - 1] , _lowerCAmelCase , _lowerCAmelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_lowerCAmelCase , dim=3 ) )
UpperCAmelCase_ =torch.cat(_lowerCAmelCase , dim=2 )
UpperCAmelCase_ =DiagonalGaussianDistribution(_lowerCAmelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_lowerCAmelCase )
def lowerCAmelCase__ ( self: str , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
UpperCAmelCase_ =int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ =int(self.tile_sample_min_size * self.tile_overlap_factor )
UpperCAmelCase_ =self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
UpperCAmelCase_ =[]
for i in range(0 , z.shape[2] , _lowerCAmelCase ):
UpperCAmelCase_ =[]
for j in range(0 , z.shape[3] , _lowerCAmelCase ):
UpperCAmelCase_ =z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
UpperCAmelCase_ =self.post_quant_conv(_lowerCAmelCase )
UpperCAmelCase_ =self.decoder(_lowerCAmelCase )
row.append(_lowerCAmelCase )
rows.append(_lowerCAmelCase )
UpperCAmelCase_ =[]
for i, row in enumerate(_lowerCAmelCase ):
UpperCAmelCase_ =[]
for j, tile in enumerate(_lowerCAmelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ =self.blend_v(rows[i - 1][j] , _lowerCAmelCase , _lowerCAmelCase )
if j > 0:
UpperCAmelCase_ =self.blend_h(row[j - 1] , _lowerCAmelCase , _lowerCAmelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_lowerCAmelCase , dim=3 ) )
UpperCAmelCase_ =torch.cat(_lowerCAmelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowerCAmelCase )
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: bool = False , _lowerCAmelCase: bool = True , _lowerCAmelCase: Optional[torch.Generator] = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
UpperCAmelCase_ =sample
UpperCAmelCase_ =self.encode(_lowerCAmelCase ).latent_dist
if sample_posterior:
UpperCAmelCase_ =posterior.sample(generator=_lowerCAmelCase )
else:
UpperCAmelCase_ =posterior.mode()
UpperCAmelCase_ =self.decode(_lowerCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowerCAmelCase )
| 54
|
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A :
@staticmethod
def lowerCAmelCase__ ( *_lowerCAmelCase: List[Any] , **_lowerCAmelCase: List[str] ) -> List[str]:
'''simple docstring'''
pass
@is_pipeline_test
@require_torch
@require_vision
class A ( unittest.TestCase ):
_snake_case =MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: List[str] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
UpperCAmelCase_ =[
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: str ) -> int:
'''simple docstring'''
UpperCAmelCase_ =vqa_pipeline(_lowerCAmelCase , top_k=1 )
self.assertEqual(
_lowerCAmelCase , [
[{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}],
[{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}],
] , )
@require_torch
def lowerCAmelCase__ ( self: Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ =pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
UpperCAmelCase_ ="./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCAmelCase_ ="How many cats are there?"
UpperCAmelCase_ =vqa_pipeline(image=_lowerCAmelCase , question="How many cats are there?" , top_k=2 )
self.assertEqual(
_lowerCAmelCase , [{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}, {"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}] )
UpperCAmelCase_ =vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
_lowerCAmelCase , [{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}, {"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}] )
@slow
@require_torch
def lowerCAmelCase__ ( self: List[str] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
UpperCAmelCase_ ="./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCAmelCase_ ="How many cats are there?"
UpperCAmelCase_ =vqa_pipeline(image=_lowerCAmelCase , question=_lowerCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}] )
UpperCAmelCase_ =vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}] )
UpperCAmelCase_ =vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [[{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def lowerCAmelCase__ ( self: int ) -> List[str]:
'''simple docstring'''
pass
| 54
| 1
|
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__lowercase : Optional[Any] =logging.get_logger(__name__)
class A ( __lowercase ):
def __init__( self: int , *_lowerCAmelCase: Optional[Any] , **_lowerCAmelCase: Dict ) -> None:
'''simple docstring'''
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead." , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 54
|
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError("The length of profit and weight must be same." )
if max_weight <= 0:
raise ValueError("max_weight must greater than zero." )
if any(p < 0 for p in profit ):
raise ValueError("Profit can not be negative." )
if any(w < 0 for w in weight ):
raise ValueError("Weight can not be negative." )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
UpperCAmelCase_ =[p / w for p, w in zip(lowercase__ , lowercase__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
UpperCAmelCase_ =sorted(lowercase__ )
# declaring useful variables
UpperCAmelCase_ =len(lowercase__ )
UpperCAmelCase_ =0
UpperCAmelCase_ =0
UpperCAmelCase_ =0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
UpperCAmelCase_ =sorted_profit_by_weight[length - i - 1]
UpperCAmelCase_ =profit_by_weight.index(lowercase__ )
UpperCAmelCase_ =-1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"""Input profits, weights, and then max_weight (all positive ints) separated by """
"""spaces."""
)
__lowercase : List[str] =[int(x) for x in input("""Input profits separated by spaces: """).split()]
__lowercase : Union[str, Any] =[int(x) for x in input("""Input weights separated by spaces: """).split()]
__lowercase : Tuple =int(input("""Max weight allowed: """))
# Function Call
calc_profit(profit, weight, max_weight)
| 54
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowercase : List[str] =logging.get_logger(__name__)
__lowercase : Dict ={
"""shi-labs/dinat-mini-in1k-224""": """https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json""",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class A ( __lowercase , __lowercase ):
_snake_case ='''dinat'''
_snake_case ={
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self: int , _lowerCAmelCase: Union[str, Any]=4 , _lowerCAmelCase: Dict=3 , _lowerCAmelCase: List[Any]=64 , _lowerCAmelCase: List[str]=[3, 4, 6, 5] , _lowerCAmelCase: Union[str, Any]=[2, 4, 8, 16] , _lowerCAmelCase: List[Any]=7 , _lowerCAmelCase: List[str]=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , _lowerCAmelCase: Tuple=3.0 , _lowerCAmelCase: List[Any]=True , _lowerCAmelCase: Union[str, Any]=0.0 , _lowerCAmelCase: int=0.0 , _lowerCAmelCase: List[Any]=0.1 , _lowerCAmelCase: Any="gelu" , _lowerCAmelCase: int=0.02 , _lowerCAmelCase: str=1e-5 , _lowerCAmelCase: List[str]=0.0 , _lowerCAmelCase: Tuple=None , _lowerCAmelCase: Union[str, Any]=None , **_lowerCAmelCase: Dict , ) -> int:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
UpperCAmelCase_ =patch_size
UpperCAmelCase_ =num_channels
UpperCAmelCase_ =embed_dim
UpperCAmelCase_ =depths
UpperCAmelCase_ =len(_lowerCAmelCase )
UpperCAmelCase_ =num_heads
UpperCAmelCase_ =kernel_size
UpperCAmelCase_ =dilations
UpperCAmelCase_ =mlp_ratio
UpperCAmelCase_ =qkv_bias
UpperCAmelCase_ =hidden_dropout_prob
UpperCAmelCase_ =attention_probs_dropout_prob
UpperCAmelCase_ =drop_path_rate
UpperCAmelCase_ =hidden_act
UpperCAmelCase_ =layer_norm_eps
UpperCAmelCase_ =initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_ =int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
UpperCAmelCase_ =layer_scale_init_value
UpperCAmelCase_ =["stem"] + [F'stage{idx}' for idx in range(1 , len(_lowerCAmelCase ) + 1 )]
UpperCAmelCase_ , UpperCAmelCase_ =get_aligned_output_features_output_indices(
out_features=_lowerCAmelCase , out_indices=_lowerCAmelCase , stage_names=self.stage_names )
| 54
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowercase : Dict ={
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any =["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] =[
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] =[
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__lowercase : Union[str, Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 54
| 1
|
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
'''simple docstring'''
UpperCAmelCase_ =[redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
UpperCAmelCase_ =1 - (matter_density + radiation_density + dark_energy)
UpperCAmelCase_ =(
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
UpperCAmelCase_ =hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
__lowercase : int =0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 54
|
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def a__ ( lowercase__ , lowercase__ , lowercase__=1_0_2_4 , lowercase__=1_0_2_4 , lowercase__=False , **lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =AutoTokenizer.from_pretrained(lowercase__ )
UpperCAmelCase_ =SeqaSeqDataset(lowercase__ , lowercase__ , lowercase__ , lowercase__ , type_path="train" , **lowercase__ )
UpperCAmelCase_ =tok.pad_token_id
def get_lens(lowercase__ ):
UpperCAmelCase_ =tqdm(
DataLoader(lowercase__ , batch_size=5_1_2 , num_workers=8 , shuffle=lowercase__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
UpperCAmelCase_ =[]
for batch in dl:
UpperCAmelCase_ =batch["input_ids"].ne(lowercase__ ).sum(1 ).tolist()
UpperCAmelCase_ =batch["labels"].ne(lowercase__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowercase__ , lowercase__ ):
max_lens.append(max(lowercase__ , lowercase__ ) )
else:
max_lens.extend(lowercase__ )
return max_lens
UpperCAmelCase_ =get_lens(lowercase__ )
UpperCAmelCase_ =SeqaSeqDataset(lowercase__ , lowercase__ , lowercase__ , lowercase__ , type_path="val" , **lowercase__ )
UpperCAmelCase_ =get_lens(lowercase__ )
pickle_save(lowercase__ , train_ds.len_file )
pickle_save(lowercase__ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 54
| 1
|
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
__lowercase : Any =datasets.utils.logging.get_logger(__name__)
@dataclass
class A ( datasets.BuilderConfig ):
_snake_case =None
_snake_case ="utf-8"
_snake_case =None
_snake_case =None
_snake_case =True # deprecated
_snake_case =None # deprecated
_snake_case =10 << 20 # 10MB
_snake_case =None
class A ( datasets.ArrowBasedBuilder ):
_snake_case =JsonConfig
def lowerCAmelCase__ ( self: Any ) -> Union[str, Any]:
'''simple docstring'''
if self.config.block_size is not None:
logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead" )
UpperCAmelCase_ =self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore." )
if self.config.newlines_in_values is not None:
raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported" )
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: int ) -> str:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
UpperCAmelCase_ =dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowerCAmelCase , (str, list, tuple) ):
UpperCAmelCase_ =data_files
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase_ =[files]
UpperCAmelCase_ =[dl_manager.iter_files(_lowerCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
UpperCAmelCase_ =[]
for split_name, files in data_files.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase_ =[files]
UpperCAmelCase_ =[dl_manager.iter_files(_lowerCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_lowerCAmelCase , gen_kwargs={"files": files} ) )
return splits
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: pa.Table ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
UpperCAmelCase_ =self.config.features.arrow_schema.field(_lowerCAmelCase ).type
UpperCAmelCase_ =pa_table.append_column(_lowerCAmelCase , pa.array([None] * len(_lowerCAmelCase ) , type=_lowerCAmelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCAmelCase_ =table_cast(_lowerCAmelCase , self.config.features.arrow_schema )
return pa_table
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: int ) -> Tuple:
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCAmelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(_lowerCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCAmelCase_ =json.load(_lowerCAmelCase )
# We keep only the field we are interested in
UpperCAmelCase_ =dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(_lowerCAmelCase , (list, tuple) ):
UpperCAmelCase_ =set().union(*[row.keys() for row in dataset] )
UpperCAmelCase_ ={col: [row.get(_lowerCAmelCase ) for row in dataset] for col in keys}
else:
UpperCAmelCase_ =dataset
UpperCAmelCase_ =pa.Table.from_pydict(_lowerCAmelCase )
yield file_idx, self._cast_table(_lowerCAmelCase )
# If the file has one json object per line
else:
with open(_lowerCAmelCase , "rb" ) as f:
UpperCAmelCase_ =0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
UpperCAmelCase_ =max(self.config.chunksize // 32 , 16 << 10 )
UpperCAmelCase_ =(
self.config.encoding_errors if self.config.encoding_errors is not None else "strict"
)
while True:
UpperCAmelCase_ =f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(_lowerCAmelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
UpperCAmelCase_ =batch.decode(self.config.encoding , errors=_lowerCAmelCase ).encode("utf-8" )
try:
while True:
try:
UpperCAmelCase_ =paj.read_json(
io.BytesIO(_lowerCAmelCase ) , read_options=paj.ReadOptions(block_size=_lowerCAmelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(_lowerCAmelCase , pa.ArrowInvalid )
and "straddling" not in str(_lowerCAmelCase )
or block_size > len(_lowerCAmelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F'Batch of {len(_lowerCAmelCase )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
_lowerCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCAmelCase_ =json.load(_lowerCAmelCase )
except json.JSONDecodeError:
logger.error(F'Failed to read file \'{file}\' with error {type(_lowerCAmelCase )}: {e}' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(_lowerCAmelCase , _lowerCAmelCase ): # list is the only sequence type supported in JSON
try:
UpperCAmelCase_ =set().union(*[row.keys() for row in dataset] )
UpperCAmelCase_ ={col: [row.get(_lowerCAmelCase ) for row in dataset] for col in keys}
UpperCAmelCase_ =pa.Table.from_pydict(_lowerCAmelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F'Failed to read file \'{file}\' with error {type(_lowerCAmelCase )}: {e}' )
raise ValueError(F'Not able to read records in the JSON file at {file}.' ) from None
yield file_idx, self._cast_table(_lowerCAmelCase )
break
else:
logger.error(F'Failed to read file \'{file}\' with error {type(_lowerCAmelCase )}: {e}' )
raise ValueError(
F'Not able to read records in the JSON file at {file}. '
F'You should probably indicate the field of the JSON file containing your records. '
F'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '
F'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_lowerCAmelCase )
batch_idx += 1
| 54
|
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A :
def __init__( self: Any , _lowerCAmelCase: str , _lowerCAmelCase: Optional[Any]=13 , _lowerCAmelCase: List[str]=30 , _lowerCAmelCase: List[Any]=2 , _lowerCAmelCase: List[str]=3 , _lowerCAmelCase: Dict=True , _lowerCAmelCase: int=True , _lowerCAmelCase: Tuple=32 , _lowerCAmelCase: str=2 , _lowerCAmelCase: Dict=4 , _lowerCAmelCase: Dict=37 , _lowerCAmelCase: Optional[Any]="gelu" , _lowerCAmelCase: List[Any]=0.1 , _lowerCAmelCase: List[Any]=0.1 , _lowerCAmelCase: Union[str, Any]=10 , _lowerCAmelCase: str=0.02 , _lowerCAmelCase: Optional[Any]=3 , _lowerCAmelCase: Optional[int]=None , ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =parent
UpperCAmelCase_ =batch_size
UpperCAmelCase_ =image_size
UpperCAmelCase_ =patch_size
UpperCAmelCase_ =num_channels
UpperCAmelCase_ =is_training
UpperCAmelCase_ =use_labels
UpperCAmelCase_ =hidden_size
UpperCAmelCase_ =num_hidden_layers
UpperCAmelCase_ =num_attention_heads
UpperCAmelCase_ =intermediate_size
UpperCAmelCase_ =hidden_act
UpperCAmelCase_ =hidden_dropout_prob
UpperCAmelCase_ =attention_probs_dropout_prob
UpperCAmelCase_ =type_sequence_label_size
UpperCAmelCase_ =initializer_range
UpperCAmelCase_ =scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ =(image_size // patch_size) ** 2
UpperCAmelCase_ =num_patches + 1
def lowerCAmelCase__ ( self: Any ) -> int:
'''simple docstring'''
UpperCAmelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ =None
if self.use_labels:
UpperCAmelCase_ =ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ =self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self: List[Any] ) -> Dict:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: int , _lowerCAmelCase: Any , _lowerCAmelCase: List[str] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =TFViTModel(config=_lowerCAmelCase )
UpperCAmelCase_ =model(_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase_ =self.image_size // 2
UpperCAmelCase_ =pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase_ =model(_lowerCAmelCase , interpolate_pos_encoding=_lowerCAmelCase , training=_lowerCAmelCase )
UpperCAmelCase_ =(image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.type_sequence_label_size
UpperCAmelCase_ =TFViTForImageClassification(_lowerCAmelCase )
UpperCAmelCase_ =model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase_ =self.image_size // 2
UpperCAmelCase_ =pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase_ =model(_lowerCAmelCase , interpolate_pos_encoding=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ =1
UpperCAmelCase_ =TFViTForImageClassification(_lowerCAmelCase )
UpperCAmelCase_ =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ =model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__ ( self: Any ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =config_and_inputs
UpperCAmelCase_ ={"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class A ( __lowercase , __lowercase , unittest.TestCase ):
_snake_case =(TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
_snake_case =(
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
_snake_case =False
_snake_case =False
_snake_case =False
def lowerCAmelCase__ ( self: int ) -> int:
'''simple docstring'''
UpperCAmelCase_ =TFViTModelTester(self )
UpperCAmelCase_ =ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase__ ( self: Optional[Any] ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowerCAmelCase__ ( self: Dict ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowerCAmelCase__ ( self: int ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: List[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ =model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase_ =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , tf.keras.layers.Layer ) )
def lowerCAmelCase__ ( self: List[str] ) -> int:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ =model_class(_lowerCAmelCase )
UpperCAmelCase_ =inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ =[*signature.parameters.keys()]
UpperCAmelCase_ =["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def lowerCAmelCase__ ( self: int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase__ ( self: List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def lowerCAmelCase__ ( self: Optional[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =TFViTModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(_lowerCAmelCase )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class A ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self: Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224" )
UpperCAmelCase_ =self.default_image_processor
UpperCAmelCase_ =prepare_img()
UpperCAmelCase_ =image_processor(images=_lowerCAmelCase , return_tensors="tf" )
# forward pass
UpperCAmelCase_ =model(**_lowerCAmelCase )
# verify the logits
UpperCAmelCase_ =tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
UpperCAmelCase_ =tf.constant([-0.27_44, 0.82_15, -0.08_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 )
| 54
| 1
|
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class A ( unittest.TestCase ):
def __init__( self: Optional[int] , _lowerCAmelCase: Tuple , _lowerCAmelCase: Optional[Any]=13 , _lowerCAmelCase: Optional[int]=7 , _lowerCAmelCase: Any=True , _lowerCAmelCase: List[Any]=True , _lowerCAmelCase: List[str]=True , _lowerCAmelCase: str=True , _lowerCAmelCase: Optional[int]=99 , _lowerCAmelCase: Any=32 , _lowerCAmelCase: Any=5 , _lowerCAmelCase: Tuple=4 , _lowerCAmelCase: Union[str, Any]=37 , _lowerCAmelCase: List[str]="gelu" , _lowerCAmelCase: Dict=0.1 , _lowerCAmelCase: Tuple=0.1 , _lowerCAmelCase: int=512 , _lowerCAmelCase: Tuple=16 , _lowerCAmelCase: Tuple=2 , _lowerCAmelCase: str=0.02 , _lowerCAmelCase: Optional[Any]=4 , ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =parent
UpperCAmelCase_ =batch_size
UpperCAmelCase_ =seq_length
UpperCAmelCase_ =is_training
UpperCAmelCase_ =use_attention_mask
UpperCAmelCase_ =use_token_type_ids
UpperCAmelCase_ =use_labels
UpperCAmelCase_ =vocab_size
UpperCAmelCase_ =hidden_size
UpperCAmelCase_ =num_hidden_layers
UpperCAmelCase_ =num_attention_heads
UpperCAmelCase_ =intermediate_size
UpperCAmelCase_ =hidden_act
UpperCAmelCase_ =hidden_dropout_prob
UpperCAmelCase_ =attention_probs_dropout_prob
UpperCAmelCase_ =max_position_embeddings
UpperCAmelCase_ =type_vocab_size
UpperCAmelCase_ =type_sequence_label_size
UpperCAmelCase_ =initializer_range
UpperCAmelCase_ =num_choices
def lowerCAmelCase__ ( self: Dict ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ =None
if self.use_attention_mask:
UpperCAmelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ =None
if self.use_token_type_ids:
UpperCAmelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ =RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self: str ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =config_and_inputs
UpperCAmelCase_ ={"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self: Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =config_and_inputs
UpperCAmelCase_ =True
UpperCAmelCase_ =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase_ =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class A ( __lowercase , unittest.TestCase ):
_snake_case =True
_snake_case =(
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self: Dict ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =FlaxRobertaModelTester(self )
@slow
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase_ =model_class_name.from_pretrained("roberta-base" , from_pt=_lowerCAmelCase )
UpperCAmelCase_ =model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCAmelCase )
| 54
|
from __future__ import annotations
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if len(lowercase__ ) == 0:
return False
UpperCAmelCase_ =len(lowercase__ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , lowercase__ )
else:
return binary_search(a_list[midpoint + 1 :] , lowercase__ )
if __name__ == "__main__":
__lowercase : Tuple =input("""Enter numbers separated by comma:\n""").strip()
__lowercase : Optional[Any] =[int(item.strip()) for item in user_input.split(""",""")]
__lowercase : List[Any] =int(input("""Enter the number to be found in the list:\n""").strip())
__lowercase : Optional[Any] ="""""" if binary_search(sequence, target) else """not """
print(f"""{target} was {not_str}found in {sequence}""")
| 54
| 1
|
from collections import defaultdict
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =1
UpperCAmelCase_ =True
for v in tree[start]:
if v not in visited:
ret += dfs(lowercase__ )
if ret % 2 == 0:
cuts.append(lowercase__ )
return ret
def a__ ( ):
'''simple docstring'''
dfs(1 )
if __name__ == "__main__":
__lowercase , __lowercase : Any =10, 9
__lowercase : str =defaultdict(list)
__lowercase : dict[int, bool] ={}
__lowercase : list[int] =[]
__lowercase : List[str] =0
__lowercase : int =[(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 54
|
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__lowercase : Any =(
"""4S 3H 2C 7S 5H""",
"""9D 8H 2C 6S 7H""",
"""2D 6D 9D TH 7D""",
"""TC 8C 2S JH 6C""",
"""JH 8S TH AH QH""",
"""TS KS 5S 9S AC""",
"""KD 6S 9D TH AD""",
"""KS 8D 4D 9S 4S""", # pair
"""8C 4S KH JS 4D""", # pair
"""QH 8H KD JH 8S""", # pair
"""KC 4H KS 2H 8D""", # pair
"""KD 4S KC 3H 8S""", # pair
"""AH 8S AS KC JH""", # pair
"""3H 4C 4H 3S 2H""", # 2 pairs
"""5S 5D 2C KH KH""", # 2 pairs
"""3C KH 5D 5S KH""", # 2 pairs
"""AS 3C KH AD KH""", # 2 pairs
"""7C 7S 3S 7H 5S""", # 3 of a kind
"""7C 7S KH 2H 7H""", # 3 of a kind
"""AC KH QH AH AS""", # 3 of a kind
"""2H 4D 3C AS 5S""", # straight (low ace)
"""3C 5C 4C 2C 6H""", # straight
"""6S 8S 7S 5H 9H""", # straight
"""JS QS 9H TS KH""", # straight
"""QC KH TS JS AH""", # straight (high ace)
"""8C 9C 5C 3C TC""", # flush
"""3S 8S 9S 5S KS""", # flush
"""4C 5C 9C 8C KC""", # flush
"""JH 8H AH KH QH""", # flush
"""3D 2H 3H 2C 2D""", # full house
"""2H 2C 3S 3H 3D""", # full house
"""KH KC 3S 3H 3D""", # full house
"""JC 6H JS JD JH""", # 4 of a kind
"""JC 7H JS JD JH""", # 4 of a kind
"""JC KH JS JD JH""", # 4 of a kind
"""2S AS 4S 5S 3S""", # straight flush (low ace)
"""2D 6D 3D 4D 5D""", # straight flush
"""5C 6C 3C 7C 4C""", # straight flush
"""JH 9H TH KH QH""", # straight flush
"""JH AH TH KH QH""", # royal flush (high ace straight flush)
)
__lowercase : Union[str, Any] =(
("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""),
("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""),
("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""),
("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""),
("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""),
("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""),
("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""),
("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""),
("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""),
("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""),
("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""),
("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""),
("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""),
("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""),
("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""),
("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""),
("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""),
("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""),
("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""),
("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""),
("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""),
("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""),
("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""),
("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""),
("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""),
("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""),
("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""),
("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""),
("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""),
)
__lowercase : List[str] =(
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", True),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", False),
("""AS 3S 4S 8S 2S""", True),
)
__lowercase : str =(
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", False),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", True),
)
__lowercase : Union[str, Any] =(
("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 14]),
("""2H 5D 3C AS 5S""", False, [14, 5, 5, 3, 2]),
("""JH QD KC AS TS""", False, [14, 13, 12, 11, 10]),
("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]),
)
__lowercase : str =(
("""JH AH TH KH QH""", 0),
("""JH 9H TH KH QH""", 0),
("""JC KH JS JD JH""", 7),
("""KH KC 3S 3H 3D""", 6),
("""8C 9C 5C 3C TC""", 0),
("""JS QS 9H TS KH""", 0),
("""7C 7S KH 2H 7H""", 3),
("""3C KH 5D 5S KH""", 2),
("""QH 8H KD JH 8S""", 1),
("""2D 6D 9D TH 7D""", 0),
)
__lowercase : int =(
("""JH AH TH KH QH""", 23),
("""JH 9H TH KH QH""", 22),
("""JC KH JS JD JH""", 21),
("""KH KC 3S 3H 3D""", 20),
("""8C 9C 5C 3C TC""", 19),
("""JS QS 9H TS KH""", 18),
("""7C 7S KH 2H 7H""", 17),
("""3C KH 5D 5S KH""", 16),
("""QH 8H KD JH 8S""", 15),
("""2D 6D 9D TH 7D""", 14),
)
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =randrange(len(lowercase__ ) ), randrange(len(lowercase__ ) )
UpperCAmelCase_ =["Loss", "Tie", "Win"][(play >= oppo) + (play > oppo)]
UpperCAmelCase_ , UpperCAmelCase_ =SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def a__ ( lowercase__ = 1_0_0 ):
'''simple docstring'''
return (generate_random_hand() for _ in range(lowercase__ ))
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_flush() == expected
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_straight() == expected
@pytest.mark.parametrize("hand, expected, card_values" , lowercase__ )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =PokerHand(lowercase__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_same_kind() == expected
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._hand_type == expected
@pytest.mark.parametrize("hand, other, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ ).compare_with(PokerHand(lowercase__ ) ) == expected
@pytest.mark.parametrize("hand, other, expected" , generate_random_hands() )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ ).compare_with(PokerHand(lowercase__ ) ) == expected
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =[PokerHand(lowercase__ ) for hand in SORTED_HANDS]
UpperCAmelCase_ =poker_hands.copy()
shuffle(lowercase__ )
UpperCAmelCase_ =chain(sorted(lowercase__ ) )
for index, hand in enumerate(lowercase__ ):
assert hand == poker_hands[index]
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =[PokerHand("2D AC 3H 4H 5S" ), PokerHand("2S 3H 4H 5S 6C" )]
pokerhands.sort(reverse=lowercase__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =PokerHand("2C 4S AS 3D 5C" )
UpperCAmelCase_ =True
UpperCAmelCase_ =[5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =0
UpperCAmelCase_ =os.path.abspath(os.path.dirname(lowercase__ ) )
UpperCAmelCase_ =os.path.join(lowercase__ , "poker_hands.txt" )
with open(lowercase__ ) as file_hand:
for line in file_hand:
UpperCAmelCase_ =line[:1_4].strip()
UpperCAmelCase_ =line[1_5:].strip()
UpperCAmelCase_ , UpperCAmelCase_ =PokerHand(lowercase__ ), PokerHand(lowercase__ )
UpperCAmelCase_ =player.compare_with(lowercase__ )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 54
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
_snake_case =StableDiffusionInstructPixaPixPipeline
_snake_case =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
_snake_case =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_snake_case =IMAGE_TO_IMAGE_IMAGE_PARAMS
_snake_case =IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase__ ( self: Tuple ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
UpperCAmelCase_ =PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
torch.manual_seed(0 )
UpperCAmelCase_ =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase_ =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCAmelCase_ =CLIPTextModel(_lowerCAmelCase )
UpperCAmelCase_ =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ ={
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: Any , _lowerCAmelCase: Tuple=0 ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
UpperCAmelCase_ =image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ =Image.fromarray(np.uinta(_lowerCAmelCase ) ).convert("RGB" )
if str(_lowerCAmelCase ).startswith("mps" ):
UpperCAmelCase_ =torch.manual_seed(_lowerCAmelCase )
else:
UpperCAmelCase_ =torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
UpperCAmelCase_ ={
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"image_guidance_scale": 1,
"output_type": "numpy",
}
return inputs
def lowerCAmelCase__ ( self: Optional[int] ) -> str:
'''simple docstring'''
UpperCAmelCase_ ="cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ =self.get_dummy_components()
UpperCAmelCase_ =StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase )
UpperCAmelCase_ =sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =self.get_dummy_inputs(_lowerCAmelCase )
UpperCAmelCase_ =sd_pipe(**_lowerCAmelCase ).images
UpperCAmelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ =np.array([0.75_26, 0.37_50, 0.45_47, 0.61_17, 0.58_66, 0.50_16, 0.43_27, 0.56_42, 0.48_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase__ ( self: Any ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ ="cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ =self.get_dummy_components()
UpperCAmelCase_ =StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase )
UpperCAmelCase_ =sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =self.get_dummy_inputs(_lowerCAmelCase )
UpperCAmelCase_ ="french fries"
UpperCAmelCase_ =sd_pipe(**_lowerCAmelCase , negative_prompt=_lowerCAmelCase )
UpperCAmelCase_ =output.images
UpperCAmelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ =np.array([0.75_11, 0.36_42, 0.45_53, 0.62_36, 0.57_97, 0.50_13, 0.43_43, 0.56_11, 0.48_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase__ ( self: Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ ="cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ =self.get_dummy_components()
UpperCAmelCase_ =StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase )
UpperCAmelCase_ =sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =self.get_dummy_inputs(_lowerCAmelCase )
UpperCAmelCase_ =[inputs["prompt"]] * 2
UpperCAmelCase_ =np.array(inputs["image"] ).astype(np.floataa ) / 2_55.0
UpperCAmelCase_ =torch.from_numpy(_lowerCAmelCase ).unsqueeze(0 ).to(_lowerCAmelCase )
UpperCAmelCase_ =image / 2 + 0.5
UpperCAmelCase_ =image.permute(0 , 3 , 1 , 2 )
UpperCAmelCase_ =image.repeat(2 , 1 , 1 , 1 )
UpperCAmelCase_ =sd_pipe(**_lowerCAmelCase ).images
UpperCAmelCase_ =image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
UpperCAmelCase_ =np.array([0.58_12, 0.57_48, 0.52_22, 0.59_08, 0.56_95, 0.71_74, 0.68_04, 0.55_23, 0.55_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase__ ( self: Union[str, Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ ="cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ =self.get_dummy_components()
UpperCAmelCase_ =EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" )
UpperCAmelCase_ =StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase )
UpperCAmelCase_ =sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =self.get_dummy_inputs(_lowerCAmelCase )
UpperCAmelCase_ =sd_pipe(**_lowerCAmelCase ).images
UpperCAmelCase_ =image[0, -3:, -3:, -1]
UpperCAmelCase_ =[round(_lowerCAmelCase , 4 ) for x in image_slice.flatten().tolist()]
print(",".join([str(_lowerCAmelCase ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ =np.array([0.74_17, 0.38_42, 0.47_32, 0.57_76, 0.58_91, 0.51_39, 0.40_52, 0.56_73, 0.49_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase__ ( self: Optional[int] ) -> Optional[Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowerCAmelCase__ ( self: str ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =self.get_dummy_components()
UpperCAmelCase_ =StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase )
UpperCAmelCase_ =VaeImageProcessor(do_resize=_lowerCAmelCase , do_normalize=_lowerCAmelCase )
UpperCAmelCase_ =pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =pipe(**self.get_dummy_inputs_by_type(_lowerCAmelCase , input_image_type="pt" ) )[0]
UpperCAmelCase_ =components["vae"]
UpperCAmelCase_ =self.get_dummy_inputs_by_type(_lowerCAmelCase , input_image_type="pt" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
UpperCAmelCase_ =vae.encode(inputs[image_param] ).latent_dist.mode()
UpperCAmelCase_ =pipe(**_lowerCAmelCase )[0]
UpperCAmelCase_ =np.abs(out - out_latents_inputs ).max()
self.assertLess(_lowerCAmelCase , 1e-4 , "passing latents as image input generate different result from passing image" )
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def lowerCAmelCase__ ( self: Dict ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: Tuple=0 ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =torch.manual_seed(_lowerCAmelCase )
UpperCAmelCase_ =load_image(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg" )
UpperCAmelCase_ ={
"prompt": "turn him into a cyborg",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"image_guidance_scale": 1.0,
"output_type": "numpy",
}
return inputs
def lowerCAmelCase__ ( self: Optional[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
UpperCAmelCase_ =self.get_inputs()
UpperCAmelCase_ =pipe(**_lowerCAmelCase ).images
UpperCAmelCase_ =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ =np.array([0.59_02, 0.60_15, 0.60_27, 0.59_83, 0.60_92, 0.60_61, 0.57_65, 0.57_85, 0.55_55] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowerCAmelCase__ ( self: Optional[int] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=_lowerCAmelCase )
UpperCAmelCase_ =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
UpperCAmelCase_ =self.get_inputs()
UpperCAmelCase_ =pipe(**_lowerCAmelCase ).images
UpperCAmelCase_ =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ =np.array([0.65_78, 0.68_17, 0.69_72, 0.67_61, 0.68_56, 0.69_16, 0.64_28, 0.65_16, 0.63_01] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowerCAmelCase__ ( self: Dict ) -> str:
'''simple docstring'''
UpperCAmelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=_lowerCAmelCase )
UpperCAmelCase_ =DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
UpperCAmelCase_ =self.get_inputs()
UpperCAmelCase_ =pipe(**_lowerCAmelCase ).images
UpperCAmelCase_ =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ =np.array([0.38_28, 0.38_34, 0.38_18, 0.37_92, 0.38_65, 0.37_52, 0.37_92, 0.38_47, 0.37_53] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowerCAmelCase__ ( self: Dict ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =0
def callback_fn(_lowerCAmelCase: int , _lowerCAmelCase: int , _lowerCAmelCase: torch.FloatTensor ) -> None:
UpperCAmelCase_ =True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
UpperCAmelCase_ =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
UpperCAmelCase_ =latents[0, -3:, -3:, -1]
UpperCAmelCase_ =np.array([-0.24_63, -0.46_44, -0.97_56, 1.51_76, 1.44_14, 0.78_66, 0.98_97, 0.85_21, 0.79_83] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
UpperCAmelCase_ =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
UpperCAmelCase_ =latents[0, -3:, -3:, -1]
UpperCAmelCase_ =np.array([-0.26_44, -0.46_26, -0.96_53, 1.51_76, 1.45_51, 0.76_86, 0.98_05, 0.84_52, 0.81_15] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
UpperCAmelCase_ =False
UpperCAmelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=_lowerCAmelCase , torch_dtype=torch.floataa )
UpperCAmelCase_ =pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
UpperCAmelCase_ =self.get_inputs()
pipe(**_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Dict:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=_lowerCAmelCase , torch_dtype=torch.floataa )
UpperCAmelCase_ =pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_ =self.get_inputs()
UpperCAmelCase_ =pipe(**_lowerCAmelCase )
UpperCAmelCase_ =torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def lowerCAmelCase__ ( self: Dict ) -> str:
'''simple docstring'''
UpperCAmelCase_ =self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
UpperCAmelCase_ =inputs["image"].resize((504, 504) )
UpperCAmelCase_ ="timbrooks/instruct-pix2pix"
UpperCAmelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained(
_lowerCAmelCase , safety_checker=_lowerCAmelCase , )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
UpperCAmelCase_ =pipe(**_lowerCAmelCase )
UpperCAmelCase_ =output.images[0]
UpperCAmelCase_ =image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
UpperCAmelCase_ =np.array([0.27_26, 0.25_29, 0.26_64, 0.26_55, 0.26_41, 0.26_42, 0.25_91, 0.26_49, 0.25_90] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 54
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowercase : int =logging.get_logger(__name__)
class A ( __lowercase ):
_snake_case =['''pixel_values''']
def __init__( self: List[Any] , _lowerCAmelCase: bool = True , _lowerCAmelCase: Dict[str, int] = None , _lowerCAmelCase: float = None , _lowerCAmelCase: PILImageResampling = PILImageResampling.BILINEAR , _lowerCAmelCase: bool = True , _lowerCAmelCase: Union[int, float] = 1 / 255 , _lowerCAmelCase: bool = True , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , **_lowerCAmelCase: Optional[int] , ) -> None:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
UpperCAmelCase_ =size if size is not None else {"shortest_edge": 384}
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
UpperCAmelCase_ =do_resize
UpperCAmelCase_ =size
# Default value set here for backwards compatibility where the value in config is None
UpperCAmelCase_ =crop_pct if crop_pct is not None else 224 / 256
UpperCAmelCase_ =resample
UpperCAmelCase_ =do_rescale
UpperCAmelCase_ =rescale_factor
UpperCAmelCase_ =do_normalize
UpperCAmelCase_ =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ =image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Dict[str, int] , _lowerCAmelCase: float , _lowerCAmelCase: PILImageResampling = PILImageResampling.BICUBIC , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: Any , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
UpperCAmelCase_ =size["shortest_edge"]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
UpperCAmelCase_ =int(shortest_edge / crop_pct )
UpperCAmelCase_ =get_resize_output_image_size(_lowerCAmelCase , size=_lowerCAmelCase , default_to_square=_lowerCAmelCase )
UpperCAmelCase_ =resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=_lowerCAmelCase , size=(shortest_edge, shortest_edge) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
_lowerCAmelCase , size=(shortest_edge, shortest_edge) , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Union[int, float] , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: str , ) -> Optional[Any]:
'''simple docstring'''
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Union[float, List[float]] , _lowerCAmelCase: Union[float, List[float]] , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: Dict , ) -> np.ndarray:
'''simple docstring'''
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: ImageInput , _lowerCAmelCase: bool = None , _lowerCAmelCase: Dict[str, int] = None , _lowerCAmelCase: float = None , _lowerCAmelCase: PILImageResampling = None , _lowerCAmelCase: bool = None , _lowerCAmelCase: float = None , _lowerCAmelCase: bool = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: Optional[Union[str, TensorType]] = None , _lowerCAmelCase: ChannelDimension = ChannelDimension.FIRST , **_lowerCAmelCase: Optional[Any] , ) -> PIL.Image.Image:
'''simple docstring'''
UpperCAmelCase_ =do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ =crop_pct if crop_pct is not None else self.crop_pct
UpperCAmelCase_ =resample if resample is not None else self.resample
UpperCAmelCase_ =do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ =rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ =do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ =image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ =image_std if image_std is not None else self.image_std
UpperCAmelCase_ =size if size is not None else self.size
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
UpperCAmelCase_ =make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ =[to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
UpperCAmelCase_ =[self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , crop_pct=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
if do_rescale:
UpperCAmelCase_ =[self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase ) for image in images]
if do_normalize:
UpperCAmelCase_ =[self.normalize(image=_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase ) for image in images]
UpperCAmelCase_ =[to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
UpperCAmelCase_ ={"pixel_values": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 54
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowercase : int ={
"""configuration_owlvit""": [
"""OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""OwlViTConfig""",
"""OwlViTOnnxConfig""",
"""OwlViTTextConfig""",
"""OwlViTVisionConfig""",
],
"""processing_owlvit""": ["""OwlViTProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : int =["""OwlViTFeatureExtractor"""]
__lowercase : List[str] =["""OwlViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] =[
"""OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OwlViTModel""",
"""OwlViTPreTrainedModel""",
"""OwlViTTextModel""",
"""OwlViTVisionModel""",
"""OwlViTForObjectDetection""",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__lowercase : Tuple =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 54
|
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__lowercase : List[Any] =WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""])
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =test_results.split(" " )
UpperCAmelCase_ =0
UpperCAmelCase_ =0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
UpperCAmelCase_ =expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowercase__ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ ={}
UpperCAmelCase_ =None
UpperCAmelCase_ =False
for line in failures_short_lines.split("\n" ):
if re.search(R"_ \[doctest\]" , lowercase__ ):
UpperCAmelCase_ =True
UpperCAmelCase_ =line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
UpperCAmelCase_ =line
UpperCAmelCase_ =False
return failures
class A :
def __init__( self: Optional[Any] , _lowerCAmelCase: str , _lowerCAmelCase: Dict ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =title
UpperCAmelCase_ =doc_test_results["time_spent"].split("," )[0]
UpperCAmelCase_ =doc_test_results["success"]
UpperCAmelCase_ =doc_test_results["failures"]
UpperCAmelCase_ =self.n_success + self.n_failures
# Failures and success of the modeling tests
UpperCAmelCase_ =doc_test_results
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ =[self._time_spent]
UpperCAmelCase_ =0
for time in time_spent:
UpperCAmelCase_ =time.split(":" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_lowerCAmelCase ) == 1:
UpperCAmelCase_ =[0, 0, time_parts[0]]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F'{int(_lowerCAmelCase )}h{int(_lowerCAmelCase )}m{int(_lowerCAmelCase )}s'
@property
def lowerCAmelCase__ ( self: int ) -> Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
F' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def lowerCAmelCase__ ( self: Tuple ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =40
UpperCAmelCase_ ={k: v["failed"] for k, v in doc_test_results.items() if isinstance(_lowerCAmelCase , _lowerCAmelCase )}
UpperCAmelCase_ =""
for category, failures in category_failures.items():
if len(_lowerCAmelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += F'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_lowerCAmelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ =[self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_lowerCAmelCase )
@staticmethod
def lowerCAmelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =[
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(_lowerCAmelCase )} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=_lowerCAmelCase , )
def lowerCAmelCase__ ( self: Dict ) -> List[str]:
'''simple docstring'''
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(self.payload )} ) )
UpperCAmelCase_ =F'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else "All tests passed."
UpperCAmelCase_ =client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=_lowerCAmelCase , )
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: List[str] , _lowerCAmelCase: int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =""
for key, value in failures.items():
UpperCAmelCase_ =value[:200] + " [Truncated]" if len(_lowerCAmelCase ) > 250 else value
failures_text += F'*{key}*\n_{value}_\n\n'
UpperCAmelCase_ =job_name
UpperCAmelCase_ ={"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
UpperCAmelCase_ ={
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def lowerCAmelCase__ ( self: Any ) -> List[str]:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made." )
UpperCAmelCase_ =self.doc_test_results.pop("job_link" )
self.doc_test_results.pop("failures" )
self.doc_test_results.pop("success" )
self.doc_test_results.pop("time_spent" )
UpperCAmelCase_ =sorted(self.doc_test_results.items() , key=lambda _lowerCAmelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result["failures"] ):
UpperCAmelCase_ =F'*Num failures* :{len(job_result["failed"] )} \n'
UpperCAmelCase_ =job_result["failures"]
UpperCAmelCase_ =self.get_reply_blocks(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , text=_lowerCAmelCase )
print("Sending the following reply" )
print(json.dumps({"blocks": blocks} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=F'Results for {job}' , blocks=_lowerCAmelCase , thread_ts=self.thread_ts["ts"] , )
time.sleep(1 )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =os.environ["GITHUB_RUN_ID"]
UpperCAmelCase_ =F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
UpperCAmelCase_ =requests.get(lowercase__ ).json()
UpperCAmelCase_ ={}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
UpperCAmelCase_ =math.ceil((result["total_count"] - 1_0_0) / 1_0_0 )
for i in range(lowercase__ ):
UpperCAmelCase_ =requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." , lowercase__ )
return {}
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ ={}
if os.path.exists(lowercase__ ):
UpperCAmelCase_ =os.listdir(lowercase__ )
for file in files:
try:
with open(os.path.join(lowercase__ , lowercase__ ) , encoding="utf-8" ) as f:
UpperCAmelCase_ =f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(lowercase__ , lowercase__ )}.' ) from e
return _artifact
def a__ ( ):
'''simple docstring'''
class A :
def __init__( self: Tuple , _lowerCAmelCase: str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =name
UpperCAmelCase_ =[]
def __str__( self: Optional[int] ) -> Tuple:
'''simple docstring'''
return self.name
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: str ) -> List[Any]:
'''simple docstring'''
self.paths.append({"name": self.name, "path": path} )
UpperCAmelCase_ ={}
UpperCAmelCase_ =filter(os.path.isdir , os.listdir() )
for directory in directories:
UpperCAmelCase_ =directory
if artifact_name not in _available_artifacts:
UpperCAmelCase_ =Artifact(lowercase__ )
_available_artifacts[artifact_name].add_path(lowercase__ )
return _available_artifacts
if __name__ == "__main__":
__lowercase : str =get_job_links()
__lowercase : Dict =retrieve_available_artifacts()
__lowercase : Optional[int] =collections.OrderedDict(
[
("""*.py""", """API Examples"""),
("""*.md""", """MD Examples"""),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__lowercase : Any ={
v: {
"""failed""": [],
"""failures""": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__lowercase : Tuple =github_actions_job_links.get("""run_doctests""")
__lowercase : int =available_artifacts["""doc_tests_gpu_test_reports"""].paths[0]
__lowercase : str =retrieve_artifact(artifact_path["""name"""])
if "stats" in artifact:
__lowercase , __lowercase , __lowercase : Tuple =handle_test_results(artifact["""stats"""])
__lowercase : int =failed
__lowercase : int =success
__lowercase : str =time_spent[1:-1] + """, """
__lowercase : str =extract_first_line_failure(artifact["""failures_short"""])
for line in artifact["summary_short"].split("""\n"""):
if re.search("""FAILED""", line):
__lowercase : int =line.replace("""FAILED """, """""")
__lowercase : List[Any] =line.split()[0].replace("""\n""", """""")
if "::" in line:
__lowercase , __lowercase : Any =line.split("""::""")
else:
__lowercase , __lowercase : Dict =line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__lowercase : Optional[int] =docs[file_regex]
doc_test_results[category]["failed"].append(test)
__lowercase : Tuple =all_failures[test] if test in all_failures else """N/A"""
__lowercase : Optional[int] =failure
break
__lowercase : Optional[int] =Message("""🤗 Results of the doc tests.""", doc_test_results)
message.post()
message.post_reply()
| 54
| 1
|
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class A ( unittest.TestCase ):
_snake_case =MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Any ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =hf_hub_download(
repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
UpperCAmelCase_ =VideoClassificationPipeline(model=_lowerCAmelCase , image_processor=_lowerCAmelCase , top_k=2 )
UpperCAmelCase_ =[
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: int ) -> Any:
'''simple docstring'''
for example in examples:
UpperCAmelCase_ =video_classifier(_lowerCAmelCase )
self.assertEqual(
_lowerCAmelCase , [
{"score": ANY(_lowerCAmelCase ), "label": ANY(_lowerCAmelCase )},
{"score": ANY(_lowerCAmelCase ), "label": ANY(_lowerCAmelCase )},
] , )
@require_torch
def lowerCAmelCase__ ( self: List[str] ) -> str:
'''simple docstring'''
UpperCAmelCase_ ="hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
UpperCAmelCase_ =VideoMAEFeatureExtractor(
size={"shortest_edge": 10} , crop_size={"height": 10, "width": 10} )
UpperCAmelCase_ =pipeline(
"video-classification" , model=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , frame_sampling_rate=4 )
UpperCAmelCase_ =hf_hub_download(repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
UpperCAmelCase_ =video_classifier(_lowerCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [{"score": 0.51_99, "label": "LABEL_0"}, {"score": 0.48_01, "label": "LABEL_1"}] , )
UpperCAmelCase_ =video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
[{"score": 0.51_99, "label": "LABEL_0"}, {"score": 0.48_01, "label": "LABEL_1"}],
[{"score": 0.51_99, "label": "LABEL_0"}, {"score": 0.48_01, "label": "LABEL_1"}],
] , )
@require_tf
def lowerCAmelCase__ ( self: Optional[int] ) -> List[str]:
'''simple docstring'''
pass
| 54
|
def a__ ( lowercase__ = 2_0_0 ):
'''simple docstring'''
UpperCAmelCase_ =[1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
UpperCAmelCase_ =[0] * (pence + 1)
UpperCAmelCase_ =1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowercase__ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 7_3682
| 54
| 1
|
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class A :
def __init__( self: Any , _lowerCAmelCase: Tuple , _lowerCAmelCase: List[Any]=100 , _lowerCAmelCase: Optional[Any]=13 , _lowerCAmelCase: str=30 , _lowerCAmelCase: str=2 , _lowerCAmelCase: str=3 , _lowerCAmelCase: List[str]=True , _lowerCAmelCase: Union[str, Any]=True , _lowerCAmelCase: List[str]=32 , _lowerCAmelCase: str=4 , _lowerCAmelCase: List[str]=4 , _lowerCAmelCase: str=37 , _lowerCAmelCase: str="gelu" , _lowerCAmelCase: Tuple=0.1 , _lowerCAmelCase: Optional[Any]=0.1 , _lowerCAmelCase: str=10 , _lowerCAmelCase: Optional[int]=0.02 , _lowerCAmelCase: Optional[Any]=3 , _lowerCAmelCase: Dict=None , _lowerCAmelCase: Any=[0, 1, 2, 3] , ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =parent
UpperCAmelCase_ =100
UpperCAmelCase_ =batch_size
UpperCAmelCase_ =image_size
UpperCAmelCase_ =patch_size
UpperCAmelCase_ =num_channels
UpperCAmelCase_ =is_training
UpperCAmelCase_ =use_labels
UpperCAmelCase_ =hidden_size
UpperCAmelCase_ =num_hidden_layers
UpperCAmelCase_ =num_attention_heads
UpperCAmelCase_ =intermediate_size
UpperCAmelCase_ =hidden_act
UpperCAmelCase_ =hidden_dropout_prob
UpperCAmelCase_ =attention_probs_dropout_prob
UpperCAmelCase_ =type_sequence_label_size
UpperCAmelCase_ =initializer_range
UpperCAmelCase_ =scope
UpperCAmelCase_ =out_indices
UpperCAmelCase_ =num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ =(image_size // patch_size) ** 2
UpperCAmelCase_ =num_patches + 1
def lowerCAmelCase__ ( self: Optional[int] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ =None
UpperCAmelCase_ =None
if self.use_labels:
UpperCAmelCase_ =ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase_ =self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase__ ( self: Optional[Any] ) -> List[Any]:
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def lowerCAmelCase__ ( self: Union[str, Any] , _lowerCAmelCase: Tuple , _lowerCAmelCase: int , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: Union[str, Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ =BeitModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCAmelCase_ =model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: Dict ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =BeitForMaskedImageModeling(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCAmelCase_ =model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: int , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[str] , _lowerCAmelCase: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.type_sequence_label_size
UpperCAmelCase_ =BeitForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCAmelCase_ =model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ =1
UpperCAmelCase_ =BeitForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCAmelCase_ =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ =model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: str , _lowerCAmelCase: int , _lowerCAmelCase: Any , _lowerCAmelCase: int ) -> str:
'''simple docstring'''
UpperCAmelCase_ =self.num_labels
UpperCAmelCase_ =BeitForSemanticSegmentation(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCAmelCase_ =model(_lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
UpperCAmelCase_ =model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def lowerCAmelCase__ ( self: Any ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =config_and_inputs
UpperCAmelCase_ ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A ( __lowercase , __lowercase , unittest.TestCase ):
_snake_case =(
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
_snake_case =(
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_snake_case =False
_snake_case =False
_snake_case =False
def lowerCAmelCase__ ( self: Union[str, Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =BeitModelTester(self )
UpperCAmelCase_ =ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase__ ( self: List[Any] ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Any:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def lowerCAmelCase__ ( self: List[Any] ) -> Any:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: str ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ =model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase_ =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def lowerCAmelCase__ ( self: str ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ =model_class(_lowerCAmelCase )
UpperCAmelCase_ =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ =[*signature.parameters.keys()]
UpperCAmelCase_ =["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def lowerCAmelCase__ ( self: Any ) -> int:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase__ ( self: str ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase )
def lowerCAmelCase__ ( self: List[Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCAmelCase )
def lowerCAmelCase__ ( self: Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCAmelCase_ , UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ =True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(_lowerCAmelCase ), BeitForMaskedImageModeling]:
continue
UpperCAmelCase_ =model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.train()
UpperCAmelCase_ =self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
UpperCAmelCase_ =model(**_lowerCAmelCase ).loss
loss.backward()
def lowerCAmelCase__ ( self: List[Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase_ =False
UpperCAmelCase_ =True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(_lowerCAmelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCAmelCase_ =model_class(_lowerCAmelCase )
model.gradient_checkpointing_enable()
model.to(_lowerCAmelCase )
model.train()
UpperCAmelCase_ =self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
UpperCAmelCase_ =model(**_lowerCAmelCase ).loss
loss.backward()
def lowerCAmelCase__ ( self: Tuple ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ =_config_zero_init(_lowerCAmelCase )
for model_class in self.all_model_classes:
UpperCAmelCase_ =model_class(config=_lowerCAmelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def lowerCAmelCase__ ( self: Optional[Any] ) -> List[Any]:
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ =BeitModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self: Union[str, Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ =BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(_lowerCAmelCase )
UpperCAmelCase_ =self.default_image_processor
UpperCAmelCase_ =prepare_img()
UpperCAmelCase_ =image_processor(images=_lowerCAmelCase , return_tensors="pt" ).pixel_values.to(_lowerCAmelCase )
# prepare bool_masked_pos
UpperCAmelCase_ =torch.ones((1, 196) , dtype=torch.bool ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ =model(pixel_values=_lowerCAmelCase , bool_masked_pos=_lowerCAmelCase )
UpperCAmelCase_ =outputs.logits
# verify the logits
UpperCAmelCase_ =torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , _lowerCAmelCase )
UpperCAmelCase_ =torch.tensor(
[[-3.24_37, 0.50_72, -13.91_74], [-3.24_56, 0.49_48, -13.94_01], [-3.20_33, 0.51_21, -13.85_50]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , _lowerCAmelCase , atol=1e-2 ) )
@slow
def lowerCAmelCase__ ( self: Optional[int] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(_lowerCAmelCase )
UpperCAmelCase_ =self.default_image_processor
UpperCAmelCase_ =prepare_img()
UpperCAmelCase_ =image_processor(images=_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ =model(**_lowerCAmelCase )
UpperCAmelCase_ =outputs.logits
# verify the logits
UpperCAmelCase_ =torch.Size((1, 1000) )
self.assertEqual(logits.shape , _lowerCAmelCase )
UpperCAmelCase_ =torch.tensor([-1.23_85, -1.09_87, -1.01_08] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
UpperCAmelCase_ =281
self.assertEqual(logits.argmax(-1 ).item() , _lowerCAmelCase )
@slow
def lowerCAmelCase__ ( self: str ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
_lowerCAmelCase )
UpperCAmelCase_ =self.default_image_processor
UpperCAmelCase_ =prepare_img()
UpperCAmelCase_ =image_processor(images=_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ =model(**_lowerCAmelCase )
UpperCAmelCase_ =outputs.logits
# verify the logits
UpperCAmelCase_ =torch.Size((1, 2_1841) )
self.assertEqual(logits.shape , _lowerCAmelCase )
UpperCAmelCase_ =torch.tensor([1.68_81, -0.27_87, 0.59_01] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
UpperCAmelCase_ =2396
self.assertEqual(logits.argmax(-1 ).item() , _lowerCAmelCase )
@slow
def lowerCAmelCase__ ( self: str ) -> str:
'''simple docstring'''
UpperCAmelCase_ =BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
UpperCAmelCase_ =model.to(_lowerCAmelCase )
UpperCAmelCase_ =BeitImageProcessor(do_resize=_lowerCAmelCase , size=640 , do_center_crop=_lowerCAmelCase )
UpperCAmelCase_ =load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
UpperCAmelCase_ =Image.open(ds[0]["file"] )
UpperCAmelCase_ =image_processor(images=_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ =model(**_lowerCAmelCase )
UpperCAmelCase_ =outputs.logits
# verify the logits
UpperCAmelCase_ =torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , _lowerCAmelCase )
UpperCAmelCase_ =version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
UpperCAmelCase_ =torch.tensor(
[
[[-4.92_25, -2.39_54, -3.05_22], [-2.88_22, -1.00_46, -1.75_61], [-2.95_49, -1.32_28, -2.13_47]],
[[-5.81_68, -3.41_29, -4.07_78], [-3.86_51, -2.22_14, -3.02_77], [-3.83_56, -2.46_43, -3.35_35]],
[[-0.00_78, 3.99_52, 4.07_54], [2.98_56, 4.69_44, 5.00_35], [3.24_13, 4.78_13, 4.99_69]],
] , device=_lowerCAmelCase , )
else:
UpperCAmelCase_ =torch.tensor(
[
[[-4.89_60, -2.36_88, -3.03_55], [-2.84_78, -0.98_36, -1.74_18], [-2.94_49, -1.33_32, -2.14_56]],
[[-5.80_81, -3.41_24, -4.10_06], [-3.85_61, -2.20_81, -3.03_23], [-3.83_65, -2.46_01, -3.36_69]],
[[-0.03_09, 3.98_68, 4.05_40], [2.96_40, 4.68_77, 4.99_76], [3.20_81, 4.76_90, 4.99_42]],
] , device=_lowerCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _lowerCAmelCase , atol=1e-4 ) )
@slow
def lowerCAmelCase__ ( self: int ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
UpperCAmelCase_ =model.to(_lowerCAmelCase )
UpperCAmelCase_ =BeitImageProcessor(do_resize=_lowerCAmelCase , size=640 , do_center_crop=_lowerCAmelCase )
UpperCAmelCase_ =load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
UpperCAmelCase_ =Image.open(ds[0]["file"] )
UpperCAmelCase_ =image_processor(images=_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ =model(**_lowerCAmelCase )
UpperCAmelCase_ =outputs.logits.detach().cpu()
UpperCAmelCase_ =image_processor.post_process_semantic_segmentation(outputs=_lowerCAmelCase , target_sizes=[(500, 300)] )
UpperCAmelCase_ =torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , _lowerCAmelCase )
UpperCAmelCase_ =image_processor.post_process_semantic_segmentation(outputs=_lowerCAmelCase )
UpperCAmelCase_ =torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , _lowerCAmelCase )
| 54
|
import sys
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =len(lowercase__ )
UpperCAmelCase_ =[[0 for x in range(lowercase__ )] for x in range(lowercase__ )]
UpperCAmelCase_ =[[0 for x in range(lowercase__ )] for x in range(lowercase__ )]
for chain_length in range(2 , lowercase__ ):
for a in range(1 , n - chain_length + 1 ):
UpperCAmelCase_ =a + chain_length - 1
UpperCAmelCase_ =sys.maxsize
for c in range(lowercase__ , lowercase__ ):
UpperCAmelCase_ =(
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCAmelCase_ =cost
UpperCAmelCase_ =c
return matrix, sol
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if i == j:
print("A" + str(lowercase__ ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(lowercase__ , lowercase__ , optimal_solution[i][j] )
print_optiomal_solution(lowercase__ , optimal_solution[i][j] + 1 , lowercase__ )
print(")" , end=" " )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =[3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
UpperCAmelCase_ =len(lowercase__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCAmelCase_ , UpperCAmelCase_ =matrix_chain_order(lowercase__ )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowercase__ , 1 , n - 1 )
if __name__ == "__main__":
main()
| 54
| 1
|
import mpmath # for roots of unity
import numpy as np
class A :
def __init__( self: int , _lowerCAmelCase: Optional[int]=None , _lowerCAmelCase: str=None ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =list(poly_a or [0] )[:]
UpperCAmelCase_ =list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
UpperCAmelCase_ =len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
UpperCAmelCase_ =len(self.polyB )
# Add 0 to make lengths equal a power of 2
UpperCAmelCase_ =int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
UpperCAmelCase_ =complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
UpperCAmelCase_ =self.__multiply()
def lowerCAmelCase__ ( self: str , _lowerCAmelCase: str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =[[x] for x in self.polyA] if which == "A" else [[x] for x in self.polyB]
# Corner case
if len(_lowerCAmelCase ) <= 1:
return dft[0]
#
UpperCAmelCase_ =self.c_max_length // 2
while next_ncol > 0:
UpperCAmelCase_ =[[] for i in range(_lowerCAmelCase )]
UpperCAmelCase_ =self.root**next_ncol
# First half of next step
UpperCAmelCase_ =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(_lowerCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
UpperCAmelCase_ =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(_lowerCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
UpperCAmelCase_ =new_dft
UpperCAmelCase_ =next_ncol // 2
return dft[0]
def lowerCAmelCase__ ( self: Tuple ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.__dft("A" )
UpperCAmelCase_ =self.__dft("B" )
UpperCAmelCase_ =[[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
UpperCAmelCase_ =2
while next_ncol <= self.c_max_length:
UpperCAmelCase_ =[[] for i in range(_lowerCAmelCase )]
UpperCAmelCase_ =self.root ** (next_ncol // 2)
UpperCAmelCase_ =1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
UpperCAmelCase_ =new_inverse_c
next_ncol *= 2
# Unpack
UpperCAmelCase_ =[round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self: List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ ="A = " + " + ".join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A] ) )
UpperCAmelCase_ ="B = " + " + ".join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B] ) )
UpperCAmelCase_ ="A*B = " + " + ".join(
F'{coef}*x^{i}' for coef, i in enumerate(self.product ) )
return F'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
|
from math import loga
def a__ ( lowercase__ ):
'''simple docstring'''
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(lowercase__ , lowercase__ ):
raise TypeError("Input value must be a 'int' type" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
| 1
|
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class A ( unittest.TestCase ):
_snake_case =MODEL_FOR_MASKED_LM_MAPPING
_snake_case =TF_MODEL_FOR_MASKED_LM_MAPPING
def lowerCAmelCase__ ( self: List[str] ) -> Tuple:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def lowerCAmelCase__ ( self: List[str] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" )
UpperCAmelCase_ =unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=6 ) , [
{"sequence": "My name is grouped", "score": 2.1e-05, "token": 3_8015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-05, "token": 2_5506, "token_str": " accuser"},
] , )
UpperCAmelCase_ =unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=6 ) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-05,
"token": 3_8015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-05,
"token": 2_5506,
"token_str": " accuser",
},
] , )
UpperCAmelCase_ =unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=6 ) , [
{"sequence": "My name is Clara", "score": 2e-05, "token": 1_3606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-05, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def lowerCAmelCase__ ( self: Optional[int] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" )
UpperCAmelCase_ =unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=6 ) , [
{"sequence": "My name is Maul", "score": 2.2e-05, "token": 3_5676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-05, "token": 1_6416, "token_str": "ELS"},
] , )
UpperCAmelCase_ =unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=6 ) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-05,
"token": 3_5676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 1_6416, "token_str": "ELS"},
] , )
UpperCAmelCase_ =unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=6 ) , [
{"sequence": "My name is Patrick", "score": 2.1e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-05, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-05, "token": 1_3606, "token_str": " Clara"},
] , )
UpperCAmelCase_ =unmasker("My name is <mask> <mask>" , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=6 ) , [
[
{
"score": 2.2e-05,
"token": 3_5676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-05, "token": 1_6416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-05,
"token": 3_5676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-05, "token": 1_6416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def lowerCAmelCase__ ( self: Tuple ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" )
# convert model to fp16
pipe.model.half()
UpperCAmelCase_ =pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
@slow
@require_torch
def lowerCAmelCase__ ( self: Tuple ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" )
self.run_large_test(_lowerCAmelCase )
@slow
@require_tf
def lowerCAmelCase__ ( self: Dict ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" )
self.run_large_test(_lowerCAmelCase )
def lowerCAmelCase__ ( self: str , _lowerCAmelCase: Optional[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
{"sequence": "My name is John", "score": 0.0_08, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.0_07, "token": 1573, "token_str": " Chris"},
] , )
UpperCAmelCase_ =unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.2_51,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.2_14,
"token": 1_2790,
"token_str": " Lyon",
},
] , )
UpperCAmelCase_ =unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
{"sequence": "My name is Patrick", "score": 0.0_05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.0_00, "token": 1_3606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.0_00, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def lowerCAmelCase__ ( self: str ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" )
UpperCAmelCase_ =None
UpperCAmelCase_ =None
self.run_pipeline_test(_lowerCAmelCase , [] )
@require_tf
def lowerCAmelCase__ ( self: Optional[int] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" )
UpperCAmelCase_ =None
UpperCAmelCase_ =None
self.run_pipeline_test(_lowerCAmelCase , [] )
def lowerCAmelCase__ ( self: str , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Union[str, Any] ) -> Tuple:
'''simple docstring'''
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
UpperCAmelCase_ =FillMaskPipeline(model=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
UpperCAmelCase_ =[
F'This is another {tokenizer.mask_token} test',
]
return fill_masker, examples
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: Union[str, Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =fill_masker.tokenizer
UpperCAmelCase_ =fill_masker.model
UpperCAmelCase_ =fill_masker(
F'This is a {tokenizer.mask_token}' , )
self.assertEqual(
_lowerCAmelCase , [
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
] , )
UpperCAmelCase_ =fill_masker([F'This is a {tokenizer.mask_token}'] )
self.assertEqual(
_lowerCAmelCase , [
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
] , )
UpperCAmelCase_ =fill_masker([F'This is a {tokenizer.mask_token}', F'Another {tokenizer.mask_token} great test.'] )
self.assertEqual(
_lowerCAmelCase , [
[
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
],
[
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
],
] , )
with self.assertRaises(_lowerCAmelCase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_lowerCAmelCase ):
fill_masker("This is" )
self.run_test_top_k(_lowerCAmelCase , _lowerCAmelCase )
self.run_test_targets(_lowerCAmelCase , _lowerCAmelCase )
self.run_test_top_k_targets(_lowerCAmelCase , _lowerCAmelCase )
self.fill_mask_with_duplicate_targets_and_top_k(_lowerCAmelCase , _lowerCAmelCase )
self.fill_mask_with_multiple_masks(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: Tuple , _lowerCAmelCase: Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =tokenizer.get_vocab()
UpperCAmelCase_ =sorted(vocab.keys() )[:2]
# Pipeline argument
UpperCAmelCase_ =FillMaskPipeline(model=_lowerCAmelCase , tokenizer=_lowerCAmelCase , targets=_lowerCAmelCase )
UpperCAmelCase_ =fill_masker(F'This is a {tokenizer.mask_token}' )
self.assertEqual(
_lowerCAmelCase , [
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
] , )
UpperCAmelCase_ ={vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , _lowerCAmelCase )
UpperCAmelCase_ =[tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(_lowerCAmelCase ) )
# Call argument
UpperCAmelCase_ =FillMaskPipeline(model=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
UpperCAmelCase_ =fill_masker(F'This is a {tokenizer.mask_token}' , targets=_lowerCAmelCase )
self.assertEqual(
_lowerCAmelCase , [
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
] , )
UpperCAmelCase_ ={vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , _lowerCAmelCase )
UpperCAmelCase_ =[tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(_lowerCAmelCase ) )
# Score equivalence
UpperCAmelCase_ =fill_masker(F'This is a {tokenizer.mask_token}' , targets=_lowerCAmelCase )
UpperCAmelCase_ =[top_mask["token_str"] for top_mask in outputs]
UpperCAmelCase_ =[top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_lowerCAmelCase ) == set(_lowerCAmelCase ):
UpperCAmelCase_ =fill_masker(F'This is a {tokenizer.mask_token}' , targets=_lowerCAmelCase )
UpperCAmelCase_ =[top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_lowerCAmelCase ) , nested_simplify(_lowerCAmelCase ) )
# Raises with invalid
with self.assertRaises(_lowerCAmelCase ):
UpperCAmelCase_ =fill_masker(F'This is a {tokenizer.mask_token}' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_lowerCAmelCase ):
UpperCAmelCase_ =fill_masker(F'This is a {tokenizer.mask_token}' , targets=[""] )
with self.assertRaises(_lowerCAmelCase ):
UpperCAmelCase_ =fill_masker(F'This is a {tokenizer.mask_token}' , targets="" )
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: List[str] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =FillMaskPipeline(model=_lowerCAmelCase , tokenizer=_lowerCAmelCase , top_k=2 )
UpperCAmelCase_ =fill_masker(F'This is a {tokenizer.mask_token}' )
self.assertEqual(
_lowerCAmelCase , [
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
] , )
UpperCAmelCase_ =FillMaskPipeline(model=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
UpperCAmelCase_ =fill_masker(F'This is a {tokenizer.mask_token}' , top_k=2 )
self.assertEqual(
_lowerCAmelCase , [
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
] , )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , nested_simplify(_lowerCAmelCase ) )
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =tokenizer.get_vocab()
UpperCAmelCase_ =FillMaskPipeline(model=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
# top_k=2, ntargets=3
UpperCAmelCase_ =sorted(vocab.keys() )[:3]
UpperCAmelCase_ =fill_masker(F'This is a {tokenizer.mask_token}' , top_k=2 , targets=_lowerCAmelCase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
UpperCAmelCase_ =[el["token_str"] for el in sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x["score"] , reverse=_lowerCAmelCase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_lowerCAmelCase ).issubset(_lowerCAmelCase ):
UpperCAmelCase_ =fill_masker(F'This is a {tokenizer.mask_token}' , top_k=3 , targets=_lowerCAmelCase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_lowerCAmelCase ) , nested_simplify(_lowerCAmelCase ) )
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: Dict , _lowerCAmelCase: List[str] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =FillMaskPipeline(model=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.get_vocab()
# String duplicates + id duplicates
UpperCAmelCase_ =sorted(vocab.keys() )[:3]
UpperCAmelCase_ =[targets[0], targets[1], targets[0], targets[2], targets[1]]
UpperCAmelCase_ =fill_masker(F'My name is {tokenizer.mask_token}' , targets=_lowerCAmelCase , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_lowerCAmelCase ) , 3 )
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: Tuple , _lowerCAmelCase: Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ =FillMaskPipeline(model=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
UpperCAmelCase_ =fill_masker(
F'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' , top_k=2 )
self.assertEqual(
_lowerCAmelCase , [
[
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
],
[
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
],
[
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
],
] , )
| 54
|
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Union[str, Any] =logging.get_logger(__name__)
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =torch.load(lowercase__ , map_location="cpu" )
if "model" in sd.keys():
UpperCAmelCase_ =torch.load(lowercase__ , map_location="cpu" )["model"]
# pop unnecessary weights
UpperCAmelCase_ =[
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowercase__ )
UpperCAmelCase_ ={
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
UpperCAmelCase_ =sd.pop(lowercase__ )
UpperCAmelCase_ =list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
UpperCAmelCase_ =sd[key]
# We split QKV in separate Q,K,V
UpperCAmelCase_ =key.replace(".qkv_proj." , ".q_proj." )
UpperCAmelCase_ =key.replace(".qkv_proj." , ".k_proj." )
UpperCAmelCase_ =key.replace(".qkv_proj." , ".v_proj." )
UpperCAmelCase_ =value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =torch.split(lowercase__ , depth // 3 , dim=0 )
UpperCAmelCase_ =q
UpperCAmelCase_ =k
UpperCAmelCase_ =v
del sd[key]
return sd
@torch.no_grad()
def a__ ( lowercase__ , lowercase__ , lowercase__=None ):
'''simple docstring'''
UpperCAmelCase_ =load_checkpoint(lowercase__ )
if config is not None:
UpperCAmelCase_ =OPTConfig.from_pretrained(lowercase__ )
else:
UpperCAmelCase_ =OPTConfig()
UpperCAmelCase_ =OPTModel(lowercase__ ).half().eval()
model.load_state_dict(lowercase__ )
# Check results
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
if __name__ == "__main__":
__lowercase : List[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
__lowercase : str =parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 54
| 1
|
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A ( __lowercase , unittest.TestCase ):
_snake_case =CLIPTokenizer
_snake_case =CLIPTokenizerFast
_snake_case =True
_snake_case ={}
_snake_case =False
def lowerCAmelCase__ ( self: Any ) -> List[str]:
'''simple docstring'''
super().setUp()
# fmt: off
UpperCAmelCase_ =["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCAmelCase_ =dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
UpperCAmelCase_ =["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
UpperCAmelCase_ ={"unk_token": "<unk>"}
UpperCAmelCase_ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_lowerCAmelCase ) )
def lowerCAmelCase__ ( self: str , **_lowerCAmelCase: Optional[Any] ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Union[str, Any] , **_lowerCAmelCase: Optional[int] ) -> List[str]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: Dict ) -> int:
'''simple docstring'''
UpperCAmelCase_ ="lower newer"
UpperCAmelCase_ ="lower newer"
return input_text, output_text
def lowerCAmelCase__ ( self: Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase_ ="lower newer"
UpperCAmelCase_ =["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
UpperCAmelCase_ =tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =tokens + [tokenizer.unk_token]
UpperCAmelCase_ =[10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , _lowerCAmelCase )
@require_ftfy
def lowerCAmelCase__ ( self: Tuple ) -> Union[str, Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCAmelCase_ =self.tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase_ =self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase_ ="A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
UpperCAmelCase_ =tokenizer_s.tokenize(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer_r.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
UpperCAmelCase_ ="xa\u0303y" + " " + "x\xe3y"
UpperCAmelCase_ =tokenizer_s.tokenize(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer_r.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
# Test that the tokenization is identical on unicode of space type
UpperCAmelCase_ =[
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
UpperCAmelCase_ =tokenizer_s.tokenize(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer_r.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
# Test that the tokenization is identical on unicode of line break type
UpperCAmelCase_ =[
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
UpperCAmelCase_ =tokenizer_s.tokenize(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer_r.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase__ ( self: Tuple ) -> Dict:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCAmelCase_ ="hello" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCAmelCase_ =F'{text_of_1_token} {text_of_1_token}'
UpperCAmelCase_ =self.rust_tokenizer_class.from_pretrained(
_lowerCAmelCase , use_fast=_lowerCAmelCase , )
UpperCAmelCase_ =tokenizer_r(_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowerCAmelCase ) + 1, len(_lowerCAmelCase ) + 1 + len(_lowerCAmelCase )) , )
UpperCAmelCase_ =F' {text}'
UpperCAmelCase_ =self.rust_tokenizer_class.from_pretrained(
_lowerCAmelCase , use_fast=_lowerCAmelCase , )
UpperCAmelCase_ =tokenizer_r(_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_lowerCAmelCase ) + 1, 1 + len(_lowerCAmelCase ) + 1 + len(_lowerCAmelCase )) , )
def lowerCAmelCase__ ( self: Optional[Any] ) -> Dict:
'''simple docstring'''
with self.assertRaises(_lowerCAmelCase ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def lowerCAmelCase__ ( self: Any ) -> Any:
'''simple docstring'''
super().test_tokenization_python_rust_equals()
def lowerCAmelCase__ ( self: Optional[int] ) -> Dict:
'''simple docstring'''
pass
| 54
|
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
__lowercase : str ={
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
__lowercase : Any ={
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =(images / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase_ =images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCAmelCase_ =numpy_to_pil(lowercase__ )
return images
def a__ ( lowercase__ ):
'''simple docstring'''
if images.ndim == 3:
UpperCAmelCase_ =images[None, ...]
UpperCAmelCase_ =(images * 2_5_5).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
UpperCAmelCase_ =[Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
UpperCAmelCase_ =[Image.fromarray(lowercase__ ) for image in images]
return pil_images
| 54
| 1
|
def a__ ( lowercase__ = 5_0 ):
'''simple docstring'''
UpperCAmelCase_ =[1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 54
|
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =int(lowercase__ )
if n_element < 1:
UpperCAmelCase_ =ValueError("a should be a positive number" )
raise my_error
UpperCAmelCase_ =[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =(0, 0, 0)
UpperCAmelCase_ =1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
__lowercase : Tuple =input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
__lowercase : Union[str, Any] =hamming(int(n))
print("""-----------------------------------------------------""")
print(f"""The list with nth numbers is: {hamming_numbers}""")
print("""-----------------------------------------------------""")
| 54
| 1
|
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A ( __lowercase , unittest.TestCase ):
_snake_case =DDIMPipeline
_snake_case =UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_snake_case =PipelineTesterMixin.required_optional_params - {
'''num_images_per_prompt''',
'''latents''',
'''callback''',
'''callback_steps''',
}
_snake_case =UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
_snake_case =False
def lowerCAmelCase__ ( self: List[Any] ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
UpperCAmelCase_ =DDIMScheduler()
UpperCAmelCase_ ={"unet": unet, "scheduler": scheduler}
return components
def lowerCAmelCase__ ( self: str , _lowerCAmelCase: str , _lowerCAmelCase: Union[str, Any]=0 ) -> Tuple:
'''simple docstring'''
if str(_lowerCAmelCase ).startswith("mps" ):
UpperCAmelCase_ =torch.manual_seed(_lowerCAmelCase )
else:
UpperCAmelCase_ =torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
UpperCAmelCase_ ={
"batch_size": 1,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def lowerCAmelCase__ ( self: str ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ ="cpu"
UpperCAmelCase_ =self.get_dummy_components()
UpperCAmelCase_ =self.pipeline_class(**_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =self.get_dummy_inputs(_lowerCAmelCase )
UpperCAmelCase_ =pipe(**_lowerCAmelCase ).images
UpperCAmelCase_ =image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
UpperCAmelCase_ =np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
UpperCAmelCase_ =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowerCAmelCase , 1e-3 )
def lowerCAmelCase__ ( self: List[Any] ) -> Tuple:
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def lowerCAmelCase__ ( self: int ) -> Union[str, Any]:
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3e-3 )
def lowerCAmelCase__ ( self: List[Any] ) -> str:
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def lowerCAmelCase__ ( self: Any ) -> Optional[int]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def lowerCAmelCase__ ( self: Any ) -> str:
'''simple docstring'''
UpperCAmelCase_ ="google/ddpm-cifar10-32"
UpperCAmelCase_ =UNetaDModel.from_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =DDIMScheduler()
UpperCAmelCase_ =DDIMPipeline(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
ddim.to(_lowerCAmelCase )
ddim.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =torch.manual_seed(0 )
UpperCAmelCase_ =ddim(generator=_lowerCAmelCase , eta=0.0 , output_type="numpy" ).images
UpperCAmelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ =np.array([0.17_23, 0.16_17, 0.16_00, 0.16_26, 0.14_97, 0.15_13, 0.15_05, 0.14_42, 0.14_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ ( self: Any ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ ="google/ddpm-ema-bedroom-256"
UpperCAmelCase_ =UNetaDModel.from_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =DDIMScheduler.from_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =DDIMPipeline(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
ddpm.to(_lowerCAmelCase )
ddpm.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =torch.manual_seed(0 )
UpperCAmelCase_ =ddpm(generator=_lowerCAmelCase , output_type="numpy" ).images
UpperCAmelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase_ =np.array([0.00_60, 0.02_01, 0.03_44, 0.00_24, 0.00_18, 0.00_02, 0.00_22, 0.00_00, 0.00_69] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 54
|
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
__lowercase : List[Any] =logging.get_logger(__name__)
class A ( __lowercase ):
def __init__( self: List[Any] , *_lowerCAmelCase: Optional[Any] , **_lowerCAmelCase: List[str] ) -> None:
'''simple docstring'''
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 54
| 1
|
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =list(lowercase__ )
UpperCAmelCase_ =list(lowercase__ )
UpperCAmelCase_ =0
for i in range(len(lowercase__ ) ):
if lista[i] != lista[i]:
count += 1
UpperCAmelCase_ ="_"
if count > 1:
return False
else:
return "".join(lowercase__ )
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =[]
while True:
UpperCAmelCase_ =["$"] * len(lowercase__ )
UpperCAmelCase_ =[]
for i in range(len(lowercase__ ) ):
for j in range(i + 1 , len(lowercase__ ) ):
UpperCAmelCase_ =compare_string(binary[i] , binary[j] )
if k is False:
UpperCAmelCase_ ="*"
UpperCAmelCase_ ="*"
temp.append("X" )
for i in range(len(lowercase__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(lowercase__ ) == 0:
return pi
UpperCAmelCase_ =list(set(lowercase__ ) )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =[]
for minterm in minterms:
UpperCAmelCase_ =""
for _ in range(lowercase__ ):
UpperCAmelCase_ =str(minterm % 2 ) + string
minterm //= 2
temp.append(lowercase__ )
return temp
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =list(lowercase__ )
UpperCAmelCase_ =list(lowercase__ )
UpperCAmelCase_ =0
for i in range(len(lowercase__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =[]
UpperCAmelCase_ =[0] * len(lowercase__ )
for i in range(len(chart[0] ) ):
UpperCAmelCase_ =0
UpperCAmelCase_ =-1
for j in range(len(lowercase__ ) ):
if chart[j][i] == 1:
count += 1
UpperCAmelCase_ =j
if count == 1:
UpperCAmelCase_ =1
for i in range(len(lowercase__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(lowercase__ ) ):
UpperCAmelCase_ =0
temp.append(prime_implicants[i] )
while True:
UpperCAmelCase_ =0
UpperCAmelCase_ =-1
UpperCAmelCase_ =0
for i in range(len(lowercase__ ) ):
UpperCAmelCase_ =chart[i].count(1 )
if count_n > max_n:
UpperCAmelCase_ =count_n
UpperCAmelCase_ =i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(lowercase__ ) ):
UpperCAmelCase_ =0
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =[[0 for x in range(len(lowercase__ ) )] for x in range(len(lowercase__ ) )]
for i in range(len(lowercase__ ) ):
UpperCAmelCase_ =prime_implicants[i].count("_" )
for j in range(len(lowercase__ ) ):
if is_for_table(prime_implicants[i] , binary[j] , lowercase__ ):
UpperCAmelCase_ =1
return chart
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =int(input("Enter the no. of variables\n" ) )
UpperCAmelCase_ =[
float(lowercase__ )
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split()
]
UpperCAmelCase_ =decimal_to_binary(lowercase__ , lowercase__ )
UpperCAmelCase_ =check(lowercase__ )
print("Prime Implicants are:" )
print(lowercase__ )
UpperCAmelCase_ =prime_implicant_chart(lowercase__ , lowercase__ )
UpperCAmelCase_ =selection(lowercase__ , lowercase__ )
print("Essential Prime Implicants are:" )
print(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 54
|
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class A ( __lowercase , unittest.TestCase ):
_snake_case =CanineTokenizer
_snake_case =False
def lowerCAmelCase__ ( self: Optional[Any] ) -> List[str]:
'''simple docstring'''
super().setUp()
UpperCAmelCase_ =CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCAmelCase__ ( self: Optional[int] ) -> List[str]:
'''simple docstring'''
return CanineTokenizer.from_pretrained("google/canine-s" )
def lowerCAmelCase__ ( self: Union[str, Any] , **_lowerCAmelCase: List[Any] ) -> CanineTokenizer:
'''simple docstring'''
UpperCAmelCase_ =self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
UpperCAmelCase_ =1024
return tokenizer
@require_torch
def lowerCAmelCase__ ( self: int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.canine_tokenizer
UpperCAmelCase_ =["Life is like a box of chocolates.", "You never know what you're gonna get."]
# fmt: off
UpperCAmelCase_ =[5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0]
# fmt: on
UpperCAmelCase_ =tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =list(batch.input_ids.numpy()[0] )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def lowerCAmelCase__ ( self: int ) -> str:
'''simple docstring'''
UpperCAmelCase_ =self.canine_tokenizer
UpperCAmelCase_ =["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
UpperCAmelCase_ =tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids" , _lowerCAmelCase )
self.assertIn("attention_mask" , _lowerCAmelCase )
self.assertIn("token_type_ids" , _lowerCAmelCase )
@require_torch
def lowerCAmelCase__ ( self: str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =self.canine_tokenizer
UpperCAmelCase_ =[
"What's the weater?",
"It's about 25 degrees.",
]
UpperCAmelCase_ =tokenizer(
text_target=_lowerCAmelCase , max_length=32 , padding="max_length" , truncation=_lowerCAmelCase , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def lowerCAmelCase__ ( self: Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
UpperCAmelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ =tempfile.mkdtemp()
UpperCAmelCase_ =" He is very happy, UNwant\u00E9d,running"
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.__class__.from_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
shutil.rmtree(_lowerCAmelCase )
UpperCAmelCase_ =self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ =tempfile.mkdtemp()
UpperCAmelCase_ =" He is very happy, UNwant\u00E9d,running"
UpperCAmelCase_ =tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
UpperCAmelCase_ =chr(0xe0_07 )
additional_special_tokens.append(_lowerCAmelCase )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.__class__.from_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertIn(_lowerCAmelCase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
UpperCAmelCase_ =tokenizer.__class__.from_pretrained(_lowerCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_lowerCAmelCase )
def lowerCAmelCase__ ( self: int ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ , UpperCAmelCase_ =self.get_clean_sequence(_lowerCAmelCase )
# a special token for Canine can be defined as follows:
UpperCAmelCase_ =0xe0_05
UpperCAmelCase_ =chr(_lowerCAmelCase )
tokenizer.add_special_tokens({"cls_token": special_token} )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
UpperCAmelCase_ =tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , input_encoded + special_token_id )
UpperCAmelCase_ =tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def lowerCAmelCase__ ( self: Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ =chr(0xe0_05 )
UpperCAmelCase_ =chr(0xe0_06 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=_lowerCAmelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} )
UpperCAmelCase_ =tokenizer.tokenize(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.tokenize(_lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
self.assertEqual(token_a[0] , _lowerCAmelCase )
self.assertEqual(token_a[0] , _lowerCAmelCase )
@require_tokenizers
def lowerCAmelCase__ ( self: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# a special token for Canine can be defined as follows:
UpperCAmelCase_ =0xe0_06
UpperCAmelCase_ =chr(_lowerCAmelCase )
UpperCAmelCase_ =AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(_lowerCAmelCase )
tokenizer.from_pretrained(_lowerCAmelCase )
def lowerCAmelCase__ ( self: Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase_ =json.load(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase_ =json.load(_lowerCAmelCase )
# a special token for Canine can be defined as follows:
UpperCAmelCase_ =0xe0_06
UpperCAmelCase_ =chr(_lowerCAmelCase )
UpperCAmelCase_ =[new_token_a]
UpperCAmelCase_ =[new_token_a]
with open(os.path.join(_lowerCAmelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase_ =tokenizer_class.from_pretrained(_lowerCAmelCase , extra_ids=0 )
self.assertIn(_lowerCAmelCase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
UpperCAmelCase_ =0xe0_07
UpperCAmelCase_ =chr(_lowerCAmelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase_ =[AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase )]
UpperCAmelCase_ =tokenizer_class.from_pretrained(
_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , extra_ids=0 )
self.assertIn(_lowerCAmelCase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def lowerCAmelCase__ ( self: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ ="hello world"
if self.space_between_special_tokens:
UpperCAmelCase_ ="[CLS] hello world [SEP]"
else:
UpperCAmelCase_ =input
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.decode(_lowerCAmelCase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(_lowerCAmelCase , [output, output.lower()] )
def lowerCAmelCase__ ( self: List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ =[
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
UpperCAmelCase_ ="a"
UpperCAmelCase_ =ord(_lowerCAmelCase )
for attr in attributes_list:
setattr(_lowerCAmelCase , attr + "_id" , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , attr + "_id" ) , _lowerCAmelCase )
setattr(_lowerCAmelCase , attr + "_id" , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , attr + "_id" ) , _lowerCAmelCase )
setattr(_lowerCAmelCase , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens_ids" ) , [] )
UpperCAmelCase_ =0xe0_06
UpperCAmelCase_ =chr(_lowerCAmelCase )
setattr(_lowerCAmelCase , "additional_special_tokens_ids" , [additional_special_token_id] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens" ) , [additional_special_token] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens_ids" ) , [additional_special_token_id] )
def lowerCAmelCase__ ( self: List[str] ) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Dict ) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Dict:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Any ) -> List[Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: List[Any] ) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Tuple ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: str ) -> str:
'''simple docstring'''
pass
| 54
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
__lowercase : List[str] ={"""tokenization_herbert""": ["""HerbertTokenizer"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[Any] =["""HerbertTokenizerFast"""]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
__lowercase : Tuple =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 54
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowercase : Optional[int] ="""\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
__lowercase : Dict ="""\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
__lowercase : List[str] ="""\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def lowerCAmelCase__ ( self: int ) -> MetricInfo:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: List[List[List[str]]] , _lowerCAmelCase: List[List[str]] , _lowerCAmelCase: int = 1 , _lowerCAmelCase: int = 4 , ) -> Dict[str, float]:
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_lowerCAmelCase , hypotheses=_lowerCAmelCase , min_len=_lowerCAmelCase , max_len=_lowerCAmelCase )
}
| 54
| 1
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__lowercase : Optional[Any] =random.Random()
def a__ ( lowercase__ , lowercase__=1.0 , lowercase__=None , lowercase__=None ):
'''simple docstring'''
if rng is None:
UpperCAmelCase_ =global_rng
UpperCAmelCase_ =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class A ( unittest.TestCase ):
def __init__( self: Optional[int] , _lowerCAmelCase: Any , _lowerCAmelCase: str=7 , _lowerCAmelCase: Optional[int]=400 , _lowerCAmelCase: Optional[Any]=2000 , _lowerCAmelCase: Union[str, Any]=10 , _lowerCAmelCase: Optional[int]=160 , _lowerCAmelCase: Any=8 , _lowerCAmelCase: str=0.0 , _lowerCAmelCase: Optional[int]=4000 , _lowerCAmelCase: Dict=False , _lowerCAmelCase: Optional[Any]=True , ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =parent
UpperCAmelCase_ =batch_size
UpperCAmelCase_ =min_seq_length
UpperCAmelCase_ =max_seq_length
UpperCAmelCase_ =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase_ =padding_value
UpperCAmelCase_ =sampling_rate
UpperCAmelCase_ =return_attention_mask
UpperCAmelCase_ =do_normalize
UpperCAmelCase_ =feature_size
UpperCAmelCase_ =chunk_length
UpperCAmelCase_ =hop_length
def lowerCAmelCase__ ( self: Union[str, Any] ) -> int:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: List[str]=False , _lowerCAmelCase: Dict=False ) -> str:
'''simple docstring'''
def _flatten(_lowerCAmelCase: List[str] ):
return list(itertools.chain(*_lowerCAmelCase ) )
if equal_length:
UpperCAmelCase_ =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCAmelCase_ =[
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase_ =[np.asarray(_lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A ( __lowercase , unittest.TestCase ):
_snake_case =WhisperFeatureExtractor if is_speech_available() else None
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =WhisperFeatureExtractionTester(self )
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ =feat_extract_first.save_pretrained(_lowerCAmelCase )[0]
check_json_file_has_correct_format(_lowerCAmelCase )
UpperCAmelCase_ =self.feature_extraction_class.from_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =feat_extract_first.to_dict()
UpperCAmelCase_ =feat_extract_second.to_dict()
UpperCAmelCase_ =feat_extract_first.mel_filters
UpperCAmelCase_ =feat_extract_second.mel_filters
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ =os.path.join(_lowerCAmelCase , "feat_extract.json" )
feat_extract_first.to_json_file(_lowerCAmelCase )
UpperCAmelCase_ =self.feature_extraction_class.from_json_file(_lowerCAmelCase )
UpperCAmelCase_ =feat_extract_first.to_dict()
UpperCAmelCase_ =feat_extract_second.to_dict()
UpperCAmelCase_ =feat_extract_first.mel_filters
UpperCAmelCase_ =feat_extract_second.mel_filters
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase__ ( self: List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase_ =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCAmelCase_ =[np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs]
# Test feature size
UpperCAmelCase_ =feature_extractor(_lowerCAmelCase , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
UpperCAmelCase_ =feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
UpperCAmelCase_ =feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
# Test batched
UpperCAmelCase_ =feature_extractor(_lowerCAmelCase , return_tensors="np" ).input_features
UpperCAmelCase_ =feature_extractor(_lowerCAmelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCAmelCase_ =[floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCAmelCase_ =np.asarray(_lowerCAmelCase )
UpperCAmelCase_ =feature_extractor(_lowerCAmelCase , return_tensors="np" ).input_features
UpperCAmelCase_ =feature_extractor(_lowerCAmelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
# Test truncation required
UpperCAmelCase_ =[floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
UpperCAmelCase_ =[np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs]
UpperCAmelCase_ =[x[: feature_extractor.n_samples] for x in speech_inputs]
UpperCAmelCase_ =[np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs_truncated]
UpperCAmelCase_ =feature_extractor(_lowerCAmelCase , return_tensors="np" ).input_features
UpperCAmelCase_ =feature_extractor(_lowerCAmelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
def lowerCAmelCase__ ( self: str ) -> Tuple:
'''simple docstring'''
import torch
UpperCAmelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase_ =np.random.rand(100 , 32 ).astype(np.floataa )
UpperCAmelCase_ =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCAmelCase_ =feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCAmelCase_ =feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: Optional[int] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
UpperCAmelCase_ =ds.sort("id" ).select(range(_lowerCAmelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowerCAmelCase__ ( self: str ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =torch.tensor(
[
0.11_93, -0.09_46, -0.10_98, -0.01_96, 0.02_25, -0.06_90, -0.17_36, 0.09_51,
0.09_71, -0.08_17, -0.07_02, 0.01_62, 0.02_60, 0.00_17, -0.01_92, -0.16_78,
0.07_09, -0.18_67, -0.06_55, -0.02_74, -0.02_34, -0.18_84, -0.05_16, -0.05_54,
-0.02_74, -0.14_25, -0.14_23, 0.08_37, 0.03_77, -0.08_54
] )
# fmt: on
UpperCAmelCase_ =self._load_datasamples(1 )
UpperCAmelCase_ =WhisperFeatureExtractor()
UpperCAmelCase_ =feature_extractor(_lowerCAmelCase , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , _lowerCAmelCase , atol=1e-4 ) )
def lowerCAmelCase__ ( self: str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase_ =self._load_datasamples(1 )[0]
UpperCAmelCase_ =((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
UpperCAmelCase_ =feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=_lowerCAmelCase )[0]
self.assertTrue(np.all(np.mean(_lowerCAmelCase ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_lowerCAmelCase ) - 1 ) < 1e-3 ) )
| 54
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A ( __lowercase , unittest.TestCase ):
_snake_case =KandinskyVaaImgaImgPipeline
_snake_case =['''image_embeds''', '''negative_image_embeds''', '''image''']
_snake_case =[
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
_snake_case =[
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_snake_case =False
@property
def lowerCAmelCase__ ( self: List[Any] ) -> Dict:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self: Any ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> List[str]:
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__ ( self: List[str] ) -> Dict:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self: int ) -> str:
'''simple docstring'''
return 100
@property
def lowerCAmelCase__ ( self: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ ={
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCAmelCase_ =UNetaDConditionModel(**_lowerCAmelCase )
return model
@property
def lowerCAmelCase__ ( self: Any ) -> Tuple:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ =VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase__ ( self: Dict ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =self.dummy_unet
UpperCAmelCase_ =self.dummy_movq
UpperCAmelCase_ ={
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
UpperCAmelCase_ =DDIMScheduler(**_lowerCAmelCase )
UpperCAmelCase_ ={
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: Any , _lowerCAmelCase: Optional[Any]=0 ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
UpperCAmelCase_ =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowerCAmelCase )
# create init_image
UpperCAmelCase_ =floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
UpperCAmelCase_ =image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ =Image.fromarray(np.uinta(_lowerCAmelCase ) ).convert("RGB" ).resize((256, 256) )
if str(_lowerCAmelCase ).startswith("mps" ):
UpperCAmelCase_ =torch.manual_seed(_lowerCAmelCase )
else:
UpperCAmelCase_ =torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
UpperCAmelCase_ ={
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def lowerCAmelCase__ ( self: int ) -> int:
'''simple docstring'''
UpperCAmelCase_ ="cpu"
UpperCAmelCase_ =self.get_dummy_components()
UpperCAmelCase_ =self.pipeline_class(**_lowerCAmelCase )
UpperCAmelCase_ =pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
UpperCAmelCase_ =output.images
UpperCAmelCase_ =pipe(
**self.get_dummy_inputs(_lowerCAmelCase ) , return_dict=_lowerCAmelCase , )[0]
UpperCAmelCase_ =image[0, -3:, -3:, -1]
UpperCAmelCase_ =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ =np.array(
[0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def lowerCAmelCase__ ( self: List[Any] ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: int ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_img2img_frog.npy" )
UpperCAmelCase_ =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
UpperCAmelCase_ ="A red cartoon frog, 4k"
UpperCAmelCase_ =KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCAmelCase )
UpperCAmelCase_ =KandinskyVaaImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
UpperCAmelCase_ =pipeline.to(_lowerCAmelCase )
pipeline.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ , UpperCAmelCase_ =pipe_prior(
_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCAmelCase_ =pipeline(
image=_lowerCAmelCase , image_embeds=_lowerCAmelCase , negative_image_embeds=_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
UpperCAmelCase_ =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
| 54
| 1
|
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__lowercase : Dict ={
"""iou_prediction_head.layers.0""": """iou_prediction_head.proj_in""",
"""iou_prediction_head.layers.1""": """iou_prediction_head.layers.0""",
"""iou_prediction_head.layers.2""": """iou_prediction_head.proj_out""",
"""mask_decoder.output_upscaling.0""": """mask_decoder.upscale_conv1""",
"""mask_decoder.output_upscaling.1""": """mask_decoder.upscale_layer_norm""",
"""mask_decoder.output_upscaling.3""": """mask_decoder.upscale_conv2""",
"""mask_downscaling.0""": """mask_embed.conv1""",
"""mask_downscaling.1""": """mask_embed.layer_norm1""",
"""mask_downscaling.3""": """mask_embed.conv2""",
"""mask_downscaling.4""": """mask_embed.layer_norm2""",
"""mask_downscaling.6""": """mask_embed.conv3""",
"""point_embeddings""": """point_embed""",
"""pe_layer.positional_encoding_gaussian_matrix""": """shared_embedding.positional_embedding""",
"""image_encoder""": """vision_encoder""",
"""neck.0""": """neck.conv1""",
"""neck.1""": """neck.layer_norm1""",
"""neck.2""": """neck.conv2""",
"""neck.3""": """neck.layer_norm2""",
"""patch_embed.proj""": """patch_embed.projection""",
""".norm""": """.layer_norm""",
"""blocks""": """layers""",
}
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ ={}
state_dict.pop("pixel_mean" , lowercase__ )
state_dict.pop("pixel_std" , lowercase__ )
UpperCAmelCase_ =R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCAmelCase_ =key.replace(lowercase__ , lowercase__ )
if re.match(lowercase__ , lowercase__ ):
UpperCAmelCase_ =int(re.match(lowercase__ , lowercase__ ).group(2 ) )
if layer_nb == 0:
UpperCAmelCase_ =key.replace("layers.0" , "proj_in" )
elif layer_nb == 1:
UpperCAmelCase_ =key.replace("layers.1" , "layers.0" )
elif layer_nb == 2:
UpperCAmelCase_ =key.replace("layers.2" , "proj_out" )
UpperCAmelCase_ =value
UpperCAmelCase_ =model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__="ybelkada/segment-anything" ):
'''simple docstring'''
UpperCAmelCase_ =hf_hub_download(lowercase__ , F'checkpoints/{model_name}.pth' )
if "sam_vit_b" in model_name:
UpperCAmelCase_ =SamConfig()
elif "sam_vit_l" in model_name:
UpperCAmelCase_ =SamVisionConfig(
hidden_size=1_0_2_4 , num_hidden_layers=2_4 , num_attention_heads=1_6 , global_attn_indexes=[5, 1_1, 1_7, 2_3] , )
UpperCAmelCase_ =SamConfig(
vision_config=lowercase__ , )
elif "sam_vit_h" in model_name:
UpperCAmelCase_ =SamVisionConfig(
hidden_size=1_2_8_0 , num_hidden_layers=3_2 , num_attention_heads=1_6 , global_attn_indexes=[7, 1_5, 2_3, 3_1] , )
UpperCAmelCase_ =SamConfig(
vision_config=lowercase__ , )
UpperCAmelCase_ =torch.load(lowercase__ , map_location="cpu" )
UpperCAmelCase_ =replace_keys(lowercase__ )
UpperCAmelCase_ =SamImageProcessor()
UpperCAmelCase_ =SamProcessor(image_processor=lowercase__ )
UpperCAmelCase_ =SamModel(lowercase__ )
hf_model.load_state_dict(lowercase__ )
UpperCAmelCase_ =hf_model.to("cuda" )
UpperCAmelCase_ ="https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
UpperCAmelCase_ =Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert("RGB" )
UpperCAmelCase_ =[[[4_0_0, 6_5_0]]]
UpperCAmelCase_ =[[1]]
UpperCAmelCase_ =processor(images=np.array(lowercase__ ) , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
UpperCAmelCase_ =hf_model(**lowercase__ )
UpperCAmelCase_ =output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_8902_5115_9668
UpperCAmelCase_ =processor(
images=np.array(lowercase__ ) , input_points=lowercase__ , input_labels=lowercase__ , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
UpperCAmelCase_ =hf_model(**lowercase__ )
UpperCAmelCase_ =output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712_6030_9219_3604
UpperCAmelCase_ =((7_5, 2_7_5, 1_7_2_5, 8_5_0),)
UpperCAmelCase_ =processor(images=np.array(lowercase__ ) , input_boxes=lowercase__ , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
UpperCAmelCase_ =hf_model(**lowercase__ )
UpperCAmelCase_ =output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686_0156_0592_6514
# Test with 2 points and 1 image.
UpperCAmelCase_ =[[[4_0_0, 6_5_0], [8_0_0, 6_5_0]]]
UpperCAmelCase_ =[[1, 1]]
UpperCAmelCase_ =processor(
images=np.array(lowercase__ ) , input_points=lowercase__ , input_labels=lowercase__ , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
UpperCAmelCase_ =hf_model(**lowercase__ )
UpperCAmelCase_ =output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936_0477_9243_4692
if __name__ == "__main__":
__lowercase : Optional[Any] =argparse.ArgumentParser()
__lowercase : List[Any] =["""sam_vit_b_01ec64""", """sam_vit_h_4b8939""", """sam_vit_l_0b3195"""]
parser.add_argument(
"""--model_name""",
default="""sam_vit_h_4b8939""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
parser.add_argument(
"""--model_hub_id""",
default="""ybelkada/segment-anything""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
__lowercase : List[Any] =parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 54
|
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class A ( unittest.TestCase ):
def __init__( self: Optional[int] , _lowerCAmelCase: Tuple , _lowerCAmelCase: Optional[Any]=13 , _lowerCAmelCase: Optional[int]=7 , _lowerCAmelCase: Any=True , _lowerCAmelCase: List[Any]=True , _lowerCAmelCase: List[str]=True , _lowerCAmelCase: str=True , _lowerCAmelCase: Optional[int]=99 , _lowerCAmelCase: Any=32 , _lowerCAmelCase: Any=5 , _lowerCAmelCase: Tuple=4 , _lowerCAmelCase: Union[str, Any]=37 , _lowerCAmelCase: List[str]="gelu" , _lowerCAmelCase: Dict=0.1 , _lowerCAmelCase: Tuple=0.1 , _lowerCAmelCase: int=512 , _lowerCAmelCase: Tuple=16 , _lowerCAmelCase: Tuple=2 , _lowerCAmelCase: str=0.02 , _lowerCAmelCase: Optional[Any]=4 , ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =parent
UpperCAmelCase_ =batch_size
UpperCAmelCase_ =seq_length
UpperCAmelCase_ =is_training
UpperCAmelCase_ =use_attention_mask
UpperCAmelCase_ =use_token_type_ids
UpperCAmelCase_ =use_labels
UpperCAmelCase_ =vocab_size
UpperCAmelCase_ =hidden_size
UpperCAmelCase_ =num_hidden_layers
UpperCAmelCase_ =num_attention_heads
UpperCAmelCase_ =intermediate_size
UpperCAmelCase_ =hidden_act
UpperCAmelCase_ =hidden_dropout_prob
UpperCAmelCase_ =attention_probs_dropout_prob
UpperCAmelCase_ =max_position_embeddings
UpperCAmelCase_ =type_vocab_size
UpperCAmelCase_ =type_sequence_label_size
UpperCAmelCase_ =initializer_range
UpperCAmelCase_ =num_choices
def lowerCAmelCase__ ( self: Dict ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ =None
if self.use_attention_mask:
UpperCAmelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ =None
if self.use_token_type_ids:
UpperCAmelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ =RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self: str ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =config_and_inputs
UpperCAmelCase_ ={"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self: Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =config_and_inputs
UpperCAmelCase_ =True
UpperCAmelCase_ =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase_ =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class A ( __lowercase , unittest.TestCase ):
_snake_case =True
_snake_case =(
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self: Dict ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =FlaxRobertaModelTester(self )
@slow
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase_ =model_class_name.from_pretrained("roberta-base" , from_pt=_lowerCAmelCase )
UpperCAmelCase_ =model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCAmelCase )
| 54
| 1
|
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
__lowercase : str =False
try:
__lowercase : Optional[int] =_is_package_available("""google.colab""")
except ModuleNotFoundError:
pass
@input.register
class A :
def __init__( self: List[Any] , _lowerCAmelCase: str = None , _lowerCAmelCase: list = [] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =0
UpperCAmelCase_ =choices
UpperCAmelCase_ =prompt
if sys.platform == "win32":
UpperCAmelCase_ ="*"
else:
UpperCAmelCase_ ="➔ "
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: Dict , _lowerCAmelCase: str = "" ) -> int:
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , _lowerCAmelCase )
else:
forceWrite(self.choices[index] , _lowerCAmelCase )
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: int ) -> Optional[Any]:
'''simple docstring'''
if index == self.position:
forceWrite(F' {self.arrow_char} ' )
self.write_choice(_lowerCAmelCase )
else:
forceWrite(F' {self.choices[index]}' )
reset_cursor()
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: Direction , _lowerCAmelCase: int = 1 ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(_lowerCAmelCase )
move_cursor(_lowerCAmelCase , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def lowerCAmelCase__ ( self: Optional[Any] ) -> List[Any]:
'''simple docstring'''
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def lowerCAmelCase__ ( self: List[Any] ) -> Optional[int]:
'''simple docstring'''
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def lowerCAmelCase__ ( self: Dict ) -> Any:
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , "DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def lowerCAmelCase__ ( self: Tuple ) -> Optional[Any]:
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , "DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(_lowerCAmelCase )] for number in range(10 )] )
def lowerCAmelCase__ ( self: Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =int(chr(self.current_selection ) )
UpperCAmelCase_ =index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , _lowerCAmelCase )
else:
return
else:
return
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: int = 0 ) -> Dict:
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" )
UpperCAmelCase_ =default_choice
for i in range(len(self.choices ) ):
self.print_choice(_lowerCAmelCase )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position , "UP" )
with cursor.hide():
while True:
if in_colab:
try:
UpperCAmelCase_ =int(builtins.input() )
except ValueError:
UpperCAmelCase_ =default_choice
else:
UpperCAmelCase_ =self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , "UP" )
clear_line()
self.write_choice(_lowerCAmelCase , "\n" )
return choice
| 54
|
from __future__ import annotations
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if b == 0:
return (1, 0)
((UpperCAmelCase_) , (UpperCAmelCase_)) =extended_euclid(lowercase__ , a % b )
UpperCAmelCase_ =a // b
return (y, x - k * y)
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
((UpperCAmelCase_) , (UpperCAmelCase_)) =extended_euclid(lowercase__ , lowercase__ )
UpperCAmelCase_ =na * na
UpperCAmelCase_ =ra * x * na + ra * y * na
return (n % m + m) % m
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
((UpperCAmelCase_) , (UpperCAmelCase_)) =extended_euclid(lowercase__ , lowercase__ )
if b < 0:
UpperCAmelCase_ =(b % n + n) % n
return b
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =invert_modulo(lowercase__ , lowercase__ ), invert_modulo(lowercase__ , lowercase__ )
UpperCAmelCase_ =na * na
UpperCAmelCase_ =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="""chinese_remainder_theorem""", verbose=True)
testmod(name="""chinese_remainder_theorem2""", verbose=True)
testmod(name="""invert_modulo""", verbose=True)
testmod(name="""extended_euclid""", verbose=True)
| 54
| 1
|
from ... import PretrainedConfig
__lowercase : List[str] ={
"""sijunhe/nezha-cn-base""": """https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json""",
}
class A ( __lowercase ):
_snake_case =NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
_snake_case ='''nezha'''
def __init__( self: Union[str, Any] , _lowerCAmelCase: Tuple=2_1128 , _lowerCAmelCase: Dict=768 , _lowerCAmelCase: Optional[int]=12 , _lowerCAmelCase: Union[str, Any]=12 , _lowerCAmelCase: List[str]=3072 , _lowerCAmelCase: Any="gelu" , _lowerCAmelCase: str=0.1 , _lowerCAmelCase: Any=0.1 , _lowerCAmelCase: int=512 , _lowerCAmelCase: Tuple=64 , _lowerCAmelCase: List[Any]=2 , _lowerCAmelCase: Any=0.02 , _lowerCAmelCase: List[str]=1e-12 , _lowerCAmelCase: List[Any]=0.1 , _lowerCAmelCase: Dict=0 , _lowerCAmelCase: List[Any]=2 , _lowerCAmelCase: Optional[Any]=3 , _lowerCAmelCase: Dict=True , **_lowerCAmelCase: List[Any] , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase_ =vocab_size
UpperCAmelCase_ =hidden_size
UpperCAmelCase_ =num_hidden_layers
UpperCAmelCase_ =num_attention_heads
UpperCAmelCase_ =hidden_act
UpperCAmelCase_ =intermediate_size
UpperCAmelCase_ =hidden_dropout_prob
UpperCAmelCase_ =attention_probs_dropout_prob
UpperCAmelCase_ =max_position_embeddings
UpperCAmelCase_ =max_relative_position
UpperCAmelCase_ =type_vocab_size
UpperCAmelCase_ =initializer_range
UpperCAmelCase_ =layer_norm_eps
UpperCAmelCase_ =classifier_dropout
UpperCAmelCase_ =use_cache
| 54
|
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__lowercase : Tuple =logging.getLogger(__name__)
__lowercase : Optional[int] =tf.data.AUTOTUNE
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =argparse.ArgumentParser(description="Train a masked language model on TPU." )
parser.add_argument(
"--pretrained_model_config" , type=lowercase__ , default="roberta-base" , help="The model config to use. Note that we don't copy the model's weights, only the config!" , )
parser.add_argument(
"--tokenizer" , type=lowercase__ , default="unigram-tokenizer-wikitext" , help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size." , )
parser.add_argument(
"--per_replica_batch_size" , type=lowercase__ , default=8 , help="Batch size per TPU core." , )
parser.add_argument(
"--no_tpu" , action="store_true" , help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances." , )
parser.add_argument(
"--tpu_name" , type=lowercase__ , help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs." , default="local" , )
parser.add_argument(
"--tpu_zone" , type=lowercase__ , help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." , )
parser.add_argument(
"--gcp_project" , type=lowercase__ , help="Google cloud project name. Only used for non-Colab TPU nodes." )
parser.add_argument(
"--bfloat16" , action="store_true" , help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." , )
parser.add_argument(
"--train_dataset" , type=lowercase__ , help="Path to training dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--shuffle_buffer_size" , type=lowercase__ , default=2**1_8 , help="Size of the shuffle buffer (in samples)" , )
parser.add_argument(
"--eval_dataset" , type=lowercase__ , help="Path to evaluation dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--num_epochs" , type=lowercase__ , default=1 , help="Number of epochs to train for." , )
parser.add_argument(
"--learning_rate" , type=lowercase__ , default=1E-4 , help="Learning rate to use for training." , )
parser.add_argument(
"--weight_decay_rate" , type=lowercase__ , default=1E-3 , help="Weight decay rate to use for training." , )
parser.add_argument(
"--max_length" , type=lowercase__ , default=5_1_2 , help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" , )
parser.add_argument(
"--mlm_probability" , type=lowercase__ , default=0.15 , help="Fraction of tokens to mask during training." , )
parser.add_argument("--output_dir" , type=lowercase__ , required=lowercase__ , help="Path to save model checkpoints to." )
parser.add_argument("--hub_model_id" , type=lowercase__ , help="Model ID to upload to on the Hugging Face Hub." )
UpperCAmelCase_ =parser.parse_args()
return args
def a__ ( lowercase__ ):
'''simple docstring'''
try:
if args.tpu_name:
UpperCAmelCase_ =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
UpperCAmelCase_ =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or "
"--gcp_project. When running on a TPU VM, use --tpu_name local." )
tf.config.experimental_connect_to_cluster(lowercase__ )
tf.tpu.experimental.initialize_tpu_system(lowercase__ )
return tpu
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =0
for file in file_list:
UpperCAmelCase_ =file.split("/" )[-1]
UpperCAmelCase_ =re.search(R"-\d+-(\d+)\.tfrecord" , lowercase__ ).group(1 )
UpperCAmelCase_ =int(lowercase__ )
num_samples += sample_count
return num_samples
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=None ):
'''simple docstring'''
UpperCAmelCase_ =count_samples(lowercase__ )
UpperCAmelCase_ =tf.data.Dataset.from_tensor_slices(lowercase__ )
if shuffle:
UpperCAmelCase_ =dataset.shuffle(len(lowercase__ ) )
UpperCAmelCase_ =tf.data.TFRecordDataset(lowercase__ , num_parallel_reads=lowercase__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
UpperCAmelCase_ =dataset.apply(tf.data.experimental.assert_cardinality(lowercase__ ) )
UpperCAmelCase_ =dataset.map(lowercase__ , num_parallel_calls=lowercase__ )
if shuffle:
assert shuffle_buffer_size is not None
UpperCAmelCase_ =dataset.shuffle(args.shuffle_buffer_size )
UpperCAmelCase_ =dataset.batch(lowercase__ , drop_remainder=lowercase__ )
UpperCAmelCase_ =dataset.map(lowercase__ , num_parallel_calls=lowercase__ )
UpperCAmelCase_ =dataset.prefetch(lowercase__ )
return dataset
def a__ ( lowercase__ ):
'''simple docstring'''
if not args.no_tpu:
UpperCAmelCase_ =initialize_tpu(lowercase__ )
UpperCAmelCase_ =tf.distribute.TPUStrategy(lowercase__ )
else:
UpperCAmelCase_ =tf.distribute.OneDeviceStrategy(device="/gpu:0" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" )
UpperCAmelCase_ =AutoTokenizer.from_pretrained(args.tokenizer )
UpperCAmelCase_ =AutoConfig.from_pretrained(args.pretrained_model_config )
UpperCAmelCase_ =tokenizer.vocab_size
UpperCAmelCase_ =tf.io.gfile.glob(os.path.join(args.train_dataset , "*.tfrecord" ) )
if not training_records:
raise ValueError(F'No .tfrecord files found in {args.train_dataset}.' )
UpperCAmelCase_ =tf.io.gfile.glob(os.path.join(args.eval_dataset , "*.tfrecord" ) )
if not eval_records:
raise ValueError(F'No .tfrecord files found in {args.eval_dataset}.' )
UpperCAmelCase_ =count_samples(lowercase__ )
UpperCAmelCase_ =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
UpperCAmelCase_ =steps_per_epoch * args.num_epochs
with strategy.scope():
UpperCAmelCase_ =TFAutoModelForMaskedLM.from_config(lowercase__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
UpperCAmelCase_ , UpperCAmelCase_ =create_optimizer(
num_train_steps=lowercase__ , num_warmup_steps=total_train_steps // 2_0 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowercase__ , metrics=["accuracy"] )
def decode_fn(lowercase__ ):
UpperCAmelCase_ ={
"input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowercase__ , lowercase__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
UpperCAmelCase_ =DataCollatorForLanguageModeling(
tokenizer=lowercase__ , mlm_probability=args.mlm_probability , mlm=lowercase__ , return_tensors="tf" )
def mask_with_collator(lowercase__ ):
# TF really needs an isin() function
UpperCAmelCase_ =(
~tf.cast(batch["attention_mask"] , tf.bool )
| (batch["input_ids"] == tokenizer.cls_token_id)
| (batch["input_ids"] == tokenizer.sep_token_id)
)
UpperCAmelCase_ , UpperCAmelCase_ =data_collator.tf_mask_tokens(
batch["input_ids"] , vocab_size=len(lowercase__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowercase__ , )
return batch
UpperCAmelCase_ =args.per_replica_batch_size * strategy.num_replicas_in_sync
UpperCAmelCase_ =prepare_dataset(
lowercase__ , decode_fn=lowercase__ , mask_fn=lowercase__ , batch_size=lowercase__ , shuffle=lowercase__ , shuffle_buffer_size=args.shuffle_buffer_size , )
UpperCAmelCase_ =prepare_dataset(
lowercase__ , decode_fn=lowercase__ , mask_fn=lowercase__ , batch_size=lowercase__ , shuffle=lowercase__ , )
UpperCAmelCase_ =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowercase__ ) )
model.fit(
lowercase__ , validation_data=lowercase__ , epochs=args.num_epochs , callbacks=lowercase__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__lowercase : Union[str, Any] =parse_args()
main(args)
| 54
| 1
|
import sys
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =len(lowercase__ )
UpperCAmelCase_ =[[0 for x in range(lowercase__ )] for x in range(lowercase__ )]
UpperCAmelCase_ =[[0 for x in range(lowercase__ )] for x in range(lowercase__ )]
for chain_length in range(2 , lowercase__ ):
for a in range(1 , n - chain_length + 1 ):
UpperCAmelCase_ =a + chain_length - 1
UpperCAmelCase_ =sys.maxsize
for c in range(lowercase__ , lowercase__ ):
UpperCAmelCase_ =(
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCAmelCase_ =cost
UpperCAmelCase_ =c
return matrix, sol
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if i == j:
print("A" + str(lowercase__ ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(lowercase__ , lowercase__ , optimal_solution[i][j] )
print_optiomal_solution(lowercase__ , optimal_solution[i][j] + 1 , lowercase__ )
print(")" , end=" " )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =[3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
UpperCAmelCase_ =len(lowercase__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCAmelCase_ , UpperCAmelCase_ =matrix_chain_order(lowercase__ )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowercase__ , 1 , n - 1 )
if __name__ == "__main__":
main()
| 54
|
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A :
@staticmethod
def lowerCAmelCase__ ( *_lowerCAmelCase: List[Any] , **_lowerCAmelCase: List[str] ) -> List[str]:
'''simple docstring'''
pass
@is_pipeline_test
@require_torch
@require_vision
class A ( unittest.TestCase ):
_snake_case =MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: List[str] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
UpperCAmelCase_ =[
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: str ) -> int:
'''simple docstring'''
UpperCAmelCase_ =vqa_pipeline(_lowerCAmelCase , top_k=1 )
self.assertEqual(
_lowerCAmelCase , [
[{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}],
[{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}],
] , )
@require_torch
def lowerCAmelCase__ ( self: Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ =pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
UpperCAmelCase_ ="./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCAmelCase_ ="How many cats are there?"
UpperCAmelCase_ =vqa_pipeline(image=_lowerCAmelCase , question="How many cats are there?" , top_k=2 )
self.assertEqual(
_lowerCAmelCase , [{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}, {"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}] )
UpperCAmelCase_ =vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
_lowerCAmelCase , [{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}, {"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}] )
@slow
@require_torch
def lowerCAmelCase__ ( self: List[str] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
UpperCAmelCase_ ="./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCAmelCase_ ="How many cats are there?"
UpperCAmelCase_ =vqa_pipeline(image=_lowerCAmelCase , question=_lowerCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}] )
UpperCAmelCase_ =vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}] )
UpperCAmelCase_ =vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [[{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def lowerCAmelCase__ ( self: int ) -> List[str]:
'''simple docstring'''
pass
| 54
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__lowercase : List[str] =logging.get_logger(__name__)
__lowercase : List[Any] ={"""vocab_file""": """vocab.txt"""}
__lowercase : List[Any] ={
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
__lowercase : List[str] ={
"""YituTech/conv-bert-base""": 512,
"""YituTech/conv-bert-medium-small""": 512,
"""YituTech/conv-bert-small""": 512,
}
__lowercase : Dict ={
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class A ( __lowercase ):
_snake_case =VOCAB_FILES_NAMES
_snake_case =PRETRAINED_VOCAB_FILES_MAP
_snake_case =PRETRAINED_INIT_CONFIGURATION
_snake_case =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case =ConvBertTokenizer
def __init__( self: str , _lowerCAmelCase: List[str]=None , _lowerCAmelCase: Tuple=None , _lowerCAmelCase: Tuple=True , _lowerCAmelCase: Dict="[UNK]" , _lowerCAmelCase: Dict="[SEP]" , _lowerCAmelCase: Dict="[PAD]" , _lowerCAmelCase: Union[str, Any]="[CLS]" , _lowerCAmelCase: Dict="[MASK]" , _lowerCAmelCase: str=True , _lowerCAmelCase: List[str]=None , **_lowerCAmelCase: str , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
UpperCAmelCase_ =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , _lowerCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , _lowerCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _lowerCAmelCase ) != tokenize_chinese_chars
):
UpperCAmelCase_ =getattr(_lowerCAmelCase , normalizer_state.pop("type" ) )
UpperCAmelCase_ =do_lower_case
UpperCAmelCase_ =strip_accents
UpperCAmelCase_ =tokenize_chinese_chars
UpperCAmelCase_ =normalizer_class(**_lowerCAmelCase )
UpperCAmelCase_ =do_lower_case
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: Optional[Any]=None ) -> int:
'''simple docstring'''
UpperCAmelCase_ =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: List[int] , _lowerCAmelCase: Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase_ =[self.sep_token_id]
UpperCAmelCase_ =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: str , _lowerCAmelCase: Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
UpperCAmelCase_ =self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 54
|
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError("The length of profit and weight must be same." )
if max_weight <= 0:
raise ValueError("max_weight must greater than zero." )
if any(p < 0 for p in profit ):
raise ValueError("Profit can not be negative." )
if any(w < 0 for w in weight ):
raise ValueError("Weight can not be negative." )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
UpperCAmelCase_ =[p / w for p, w in zip(lowercase__ , lowercase__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
UpperCAmelCase_ =sorted(lowercase__ )
# declaring useful variables
UpperCAmelCase_ =len(lowercase__ )
UpperCAmelCase_ =0
UpperCAmelCase_ =0
UpperCAmelCase_ =0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
UpperCAmelCase_ =sorted_profit_by_weight[length - i - 1]
UpperCAmelCase_ =profit_by_weight.index(lowercase__ )
UpperCAmelCase_ =-1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"""Input profits, weights, and then max_weight (all positive ints) separated by """
"""spaces."""
)
__lowercase : List[str] =[int(x) for x in input("""Input profits separated by spaces: """).split()]
__lowercase : Union[str, Any] =[int(x) for x in input("""Input weights separated by spaces: """).split()]
__lowercase : Tuple =int(input("""Max weight allowed: """))
# Function Call
calc_profit(profit, weight, max_weight)
| 54
| 1
|
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase : Any ="""▁"""
__lowercase : str =get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class A ( __lowercase , unittest.TestCase ):
_snake_case =BertGenerationTokenizer
_snake_case =False
_snake_case =True
def lowerCAmelCase__ ( self: int ) -> Dict:
'''simple docstring'''
super().setUp()
UpperCAmelCase_ =BertGenerationTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self: List[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ ="<s>"
UpperCAmelCase_ =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) , _lowerCAmelCase )
def lowerCAmelCase__ ( self: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(_lowerCAmelCase ) , 1002 )
def lowerCAmelCase__ ( self: Any ) -> Dict:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def lowerCAmelCase__ ( self: List[str] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =BertGenerationTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.tokenize("This is a test" )
self.assertListEqual(_lowerCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [285, 46, 10, 170, 382] , )
UpperCAmelCase_ =tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase_ =tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCAmelCase_ =tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def lowerCAmelCase__ ( self: str ) -> Optional[Any]:
'''simple docstring'''
return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
@slow
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ ="Hello World!"
UpperCAmelCase_ =[1_8536, 2260, 101]
self.assertListEqual(_lowerCAmelCase , self.big_tokenizer.encode(_lowerCAmelCase ) )
@slow
def lowerCAmelCase__ ( self: Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =(
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
UpperCAmelCase_ =[
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
3_4324,
497,
391,
408,
1_1342,
1244,
385,
100,
938,
985,
456,
574,
362,
1_2597,
3200,
3129,
1172,
]
self.assertListEqual(_lowerCAmelCase , self.big_tokenizer.encode(_lowerCAmelCase ) )
@require_torch
@slow
def lowerCAmelCase__ ( self: Tuple ) -> Any:
'''simple docstring'''
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
UpperCAmelCase_ =list(self.big_tokenizer.get_vocab().keys() )[:10]
UpperCAmelCase_ =" ".join(_lowerCAmelCase )
UpperCAmelCase_ =self.big_tokenizer.encode_plus(_lowerCAmelCase , return_tensors="pt" , return_token_type_ids=_lowerCAmelCase )
UpperCAmelCase_ =self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=_lowerCAmelCase )
UpperCAmelCase_ =BertGenerationConfig()
UpperCAmelCase_ =BertGenerationEncoder(_lowerCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_lowerCAmelCase )
model(**_lowerCAmelCase )
@slow
def lowerCAmelCase__ ( self: str ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ ={"input_ids": [[3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114], [448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase , model_name="google/bert_for_seq_generation_L-24_bbc_encoder" , revision="c817d1fd1be2ffa69431227a1fe320544943d4db" , )
| 54
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowercase : Dict ={
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any =["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] =[
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] =[
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__lowercase : Union[str, Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 54
| 1
|
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class A ( __lowercase , unittest.TestCase ):
_snake_case ='''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def lowerCAmelCase__ ( self: Union[str, Any] , _lowerCAmelCase: Dict=0 ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =floats_tensor((1, 3, 128, 128) , rng=random.Random(_lowerCAmelCase ) )
UpperCAmelCase_ =np.random.RandomState(_lowerCAmelCase )
UpperCAmelCase_ ={
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowerCAmelCase__ ( self: Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =self.get_dummy_inputs()
UpperCAmelCase_ =pipe(**_lowerCAmelCase ).images
UpperCAmelCase_ =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ =np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowerCAmelCase__ ( self: Dict ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase_ =PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =self.get_dummy_inputs()
UpperCAmelCase_ =pipe(**_lowerCAmelCase ).images
UpperCAmelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ =np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCAmelCase__ ( self: int ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase_ =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
# warmup pass to apply optimizations
UpperCAmelCase_ =pipe(**self.get_dummy_inputs() )
UpperCAmelCase_ =self.get_dummy_inputs()
UpperCAmelCase_ =pipe(**_lowerCAmelCase ).images
UpperCAmelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ =np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCAmelCase__ ( self: List[str] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase_ =EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =self.get_dummy_inputs()
UpperCAmelCase_ =pipe(**_lowerCAmelCase ).images
UpperCAmelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ =np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCAmelCase__ ( self: str ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase_ =EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =self.get_dummy_inputs()
UpperCAmelCase_ =pipe(**_lowerCAmelCase ).images
UpperCAmelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ =np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCAmelCase__ ( self: Optional[int] ) -> str:
'''simple docstring'''
UpperCAmelCase_ =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase_ =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =self.get_dummy_inputs()
UpperCAmelCase_ =pipe(**_lowerCAmelCase ).images
UpperCAmelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ =np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class A ( unittest.TestCase ):
@property
def lowerCAmelCase__ ( self: List[str] ) -> Dict:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase__ ( self: List[str] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =ort.SessionOptions()
UpperCAmelCase_ =False
return options
def lowerCAmelCase__ ( self: Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
UpperCAmelCase_ =init_image.resize((768, 512) )
# using the PNDM scheduler by default
UpperCAmelCase_ =OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ ="A fantasy landscape, trending on artstation"
UpperCAmelCase_ =np.random.RandomState(0 )
UpperCAmelCase_ =pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type="np" , )
UpperCAmelCase_ =output.images
UpperCAmelCase_ =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
UpperCAmelCase_ =np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowerCAmelCase__ ( self: str ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
UpperCAmelCase_ =init_image.resize((768, 512) )
UpperCAmelCase_ =LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
UpperCAmelCase_ =OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ ="A fantasy landscape, trending on artstation"
UpperCAmelCase_ =np.random.RandomState(0 )
UpperCAmelCase_ =pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=_lowerCAmelCase , output_type="np" , )
UpperCAmelCase_ =output.images
UpperCAmelCase_ =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
UpperCAmelCase_ =np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 54
|
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def a__ ( lowercase__ , lowercase__ , lowercase__=1_0_2_4 , lowercase__=1_0_2_4 , lowercase__=False , **lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =AutoTokenizer.from_pretrained(lowercase__ )
UpperCAmelCase_ =SeqaSeqDataset(lowercase__ , lowercase__ , lowercase__ , lowercase__ , type_path="train" , **lowercase__ )
UpperCAmelCase_ =tok.pad_token_id
def get_lens(lowercase__ ):
UpperCAmelCase_ =tqdm(
DataLoader(lowercase__ , batch_size=5_1_2 , num_workers=8 , shuffle=lowercase__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
UpperCAmelCase_ =[]
for batch in dl:
UpperCAmelCase_ =batch["input_ids"].ne(lowercase__ ).sum(1 ).tolist()
UpperCAmelCase_ =batch["labels"].ne(lowercase__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowercase__ , lowercase__ ):
max_lens.append(max(lowercase__ , lowercase__ ) )
else:
max_lens.extend(lowercase__ )
return max_lens
UpperCAmelCase_ =get_lens(lowercase__ )
UpperCAmelCase_ =SeqaSeqDataset(lowercase__ , lowercase__ , lowercase__ , lowercase__ , type_path="val" , **lowercase__ )
UpperCAmelCase_ =get_lens(lowercase__ )
pickle_save(lowercase__ , train_ds.len_file )
pickle_save(lowercase__ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 54
| 1
|
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
__lowercase : int =None
try:
import msvcrt
except ImportError:
__lowercase : List[Any] =None
try:
import fcntl
except ImportError:
__lowercase : Union[str, Any] =None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
__lowercase : List[str] =OSError
# Data
# ------------------------------------------------
__lowercase : Optional[Any] =[
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
__lowercase : List[Any] ="""3.0.12"""
__lowercase : int =None
def a__ ( ):
'''simple docstring'''
global _logger
UpperCAmelCase_ =_logger or logging.getLogger(__name__ )
return _logger
class A ( __lowercase ):
def __init__( self: Any , _lowerCAmelCase: int ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =lock_file
return None
def __str__( self: List[str] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =F'The file lock \'{self.lock_file}\' could not be acquired.'
return temp
class A :
def __init__( self: List[Any] , _lowerCAmelCase: Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =lock
return None
def __enter__( self: Tuple ) -> Optional[int]:
'''simple docstring'''
return self.lock
def __exit__( self: Optional[Any] , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: int , _lowerCAmelCase: Optional[int] ) -> Any:
'''simple docstring'''
self.lock.release()
return None
class A :
def __init__( self: Optional[int] , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: Tuple=-1 , _lowerCAmelCase: Optional[Any]=None ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
UpperCAmelCase_ =self.hash_filename_if_too_long(_lowerCAmelCase , _lowerCAmelCase )
# The path to the lock file.
UpperCAmelCase_ =lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
UpperCAmelCase_ =None
# The default timeout value.
UpperCAmelCase_ =timeout
# We use this lock primarily for the lock counter.
UpperCAmelCase_ =threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
UpperCAmelCase_ =0
return None
@property
def lowerCAmelCase__ ( self: Tuple ) -> Union[str, Any]:
'''simple docstring'''
return self._lock_file
@property
def lowerCAmelCase__ ( self: Tuple ) -> int:
'''simple docstring'''
return self._timeout
@timeout.setter
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: Optional[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =float(_lowerCAmelCase )
return None
def lowerCAmelCase__ ( self: Any ) -> Optional[Any]:
'''simple docstring'''
raise NotImplementedError()
def lowerCAmelCase__ ( self: List[Any] ) -> List[str]:
'''simple docstring'''
raise NotImplementedError()
@property
def lowerCAmelCase__ ( self: Tuple ) -> Dict:
'''simple docstring'''
return self._lock_file_fd is not None
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: int=None , _lowerCAmelCase: Dict=0.05 ) -> Tuple:
'''simple docstring'''
if timeout is None:
UpperCAmelCase_ =self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
UpperCAmelCase_ =id(self )
UpperCAmelCase_ =self._lock_file
UpperCAmelCase_ =time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F'Attempting to acquire lock {lock_id} on {lock_filename}' )
self._acquire()
if self.is_locked:
logger().debug(F'Lock {lock_id} acquired on {lock_filename}' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F'Timeout on acquiring lock {lock_id} on {lock_filename}' )
raise Timeout(self._lock_file )
else:
logger().debug(
F'Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...' )
time.sleep(_lowerCAmelCase )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
UpperCAmelCase_ =max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def lowerCAmelCase__ ( self: Any , _lowerCAmelCase: Any=False ) -> Union[str, Any]:
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
UpperCAmelCase_ =id(self )
UpperCAmelCase_ =self._lock_file
logger().debug(F'Attempting to release lock {lock_id} on {lock_filename}' )
self._release()
UpperCAmelCase_ =0
logger().debug(F'Lock {lock_id} released on {lock_filename}' )
return None
def __enter__( self: List[str] ) -> Optional[Any]:
'''simple docstring'''
self.acquire()
return self
def __exit__( self: Union[str, Any] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: int , _lowerCAmelCase: Dict ) -> Tuple:
'''simple docstring'''
self.release()
return None
def __del__( self: Dict ) -> Any:
'''simple docstring'''
self.release(force=_lowerCAmelCase )
return None
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: str , _lowerCAmelCase: int ) -> str:
'''simple docstring'''
UpperCAmelCase_ =os.path.basename(_lowerCAmelCase )
if len(_lowerCAmelCase ) > max_length and max_length > 0:
UpperCAmelCase_ =os.path.dirname(_lowerCAmelCase )
UpperCAmelCase_ =str(hash(_lowerCAmelCase ) )
UpperCAmelCase_ =filename[: max_length - len(_lowerCAmelCase ) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(_lowerCAmelCase , _lowerCAmelCase )
else:
return path
class A ( __lowercase ):
def __init__( self: Optional[Any] , _lowerCAmelCase: Dict , _lowerCAmelCase: List[str]=-1 , _lowerCAmelCase: Tuple=None ) -> Dict:
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(_lowerCAmelCase , timeout=_lowerCAmelCase , max_filename_length=_lowerCAmelCase )
UpperCAmelCase_ ="\\\\?\\" + relative_to_absolute_path(self.lock_file )
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
UpperCAmelCase_ =os.open(self._lock_file , _lowerCAmelCase )
except OSError:
pass
else:
try:
msvcrt.locking(_lowerCAmelCase , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(_lowerCAmelCase )
else:
UpperCAmelCase_ =fd
return None
def lowerCAmelCase__ ( self: int ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =self._lock_file_fd
UpperCAmelCase_ =None
msvcrt.locking(_lowerCAmelCase , msvcrt.LK_UNLCK , 1 )
os.close(_lowerCAmelCase )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class A ( __lowercase ):
def __init__( self: List[Any] , _lowerCAmelCase: Dict , _lowerCAmelCase: str=-1 , _lowerCAmelCase: Dict=None ) -> str:
'''simple docstring'''
UpperCAmelCase_ =os.statvfs(os.path.dirname(_lowerCAmelCase ) ).f_namemax
super().__init__(_lowerCAmelCase , timeout=_lowerCAmelCase , max_filename_length=_lowerCAmelCase )
def lowerCAmelCase__ ( self: Optional[int] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =os.O_RDWR | os.O_CREAT | os.O_TRUNC
UpperCAmelCase_ =os.open(self._lock_file , _lowerCAmelCase )
try:
fcntl.flock(_lowerCAmelCase , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(_lowerCAmelCase )
else:
UpperCAmelCase_ =fd
return None
def lowerCAmelCase__ ( self: Tuple ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self._lock_file_fd
UpperCAmelCase_ =None
fcntl.flock(_lowerCAmelCase , fcntl.LOCK_UN )
os.close(_lowerCAmelCase )
return None
class A ( __lowercase ):
def lowerCAmelCase__ ( self: Optional[int] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
UpperCAmelCase_ =os.open(self._lock_file , _lowerCAmelCase )
except OSError:
pass
else:
UpperCAmelCase_ =fd
return None
def lowerCAmelCase__ ( self: Union[str, Any] ) -> int:
'''simple docstring'''
os.close(self._lock_file_fd )
UpperCAmelCase_ =None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
__lowercase : int =None
if msvcrt:
__lowercase : List[Any] =WindowsFileLock
elif fcntl:
__lowercase : str =UnixFileLock
else:
__lowercase : List[Any] =SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 54
|
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A :
def __init__( self: Any , _lowerCAmelCase: str , _lowerCAmelCase: Optional[Any]=13 , _lowerCAmelCase: List[str]=30 , _lowerCAmelCase: List[Any]=2 , _lowerCAmelCase: List[str]=3 , _lowerCAmelCase: Dict=True , _lowerCAmelCase: int=True , _lowerCAmelCase: Tuple=32 , _lowerCAmelCase: str=2 , _lowerCAmelCase: Dict=4 , _lowerCAmelCase: Dict=37 , _lowerCAmelCase: Optional[Any]="gelu" , _lowerCAmelCase: List[Any]=0.1 , _lowerCAmelCase: List[Any]=0.1 , _lowerCAmelCase: Union[str, Any]=10 , _lowerCAmelCase: str=0.02 , _lowerCAmelCase: Optional[Any]=3 , _lowerCAmelCase: Optional[int]=None , ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =parent
UpperCAmelCase_ =batch_size
UpperCAmelCase_ =image_size
UpperCAmelCase_ =patch_size
UpperCAmelCase_ =num_channels
UpperCAmelCase_ =is_training
UpperCAmelCase_ =use_labels
UpperCAmelCase_ =hidden_size
UpperCAmelCase_ =num_hidden_layers
UpperCAmelCase_ =num_attention_heads
UpperCAmelCase_ =intermediate_size
UpperCAmelCase_ =hidden_act
UpperCAmelCase_ =hidden_dropout_prob
UpperCAmelCase_ =attention_probs_dropout_prob
UpperCAmelCase_ =type_sequence_label_size
UpperCAmelCase_ =initializer_range
UpperCAmelCase_ =scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ =(image_size // patch_size) ** 2
UpperCAmelCase_ =num_patches + 1
def lowerCAmelCase__ ( self: Any ) -> int:
'''simple docstring'''
UpperCAmelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ =None
if self.use_labels:
UpperCAmelCase_ =ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ =self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self: List[Any] ) -> Dict:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: int , _lowerCAmelCase: Any , _lowerCAmelCase: List[str] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =TFViTModel(config=_lowerCAmelCase )
UpperCAmelCase_ =model(_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase_ =self.image_size // 2
UpperCAmelCase_ =pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase_ =model(_lowerCAmelCase , interpolate_pos_encoding=_lowerCAmelCase , training=_lowerCAmelCase )
UpperCAmelCase_ =(image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.type_sequence_label_size
UpperCAmelCase_ =TFViTForImageClassification(_lowerCAmelCase )
UpperCAmelCase_ =model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase_ =self.image_size // 2
UpperCAmelCase_ =pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase_ =model(_lowerCAmelCase , interpolate_pos_encoding=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ =1
UpperCAmelCase_ =TFViTForImageClassification(_lowerCAmelCase )
UpperCAmelCase_ =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ =model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__ ( self: Any ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =config_and_inputs
UpperCAmelCase_ ={"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class A ( __lowercase , __lowercase , unittest.TestCase ):
_snake_case =(TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
_snake_case =(
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
_snake_case =False
_snake_case =False
_snake_case =False
def lowerCAmelCase__ ( self: int ) -> int:
'''simple docstring'''
UpperCAmelCase_ =TFViTModelTester(self )
UpperCAmelCase_ =ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase__ ( self: Optional[Any] ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowerCAmelCase__ ( self: Dict ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowerCAmelCase__ ( self: int ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: List[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ =model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase_ =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , tf.keras.layers.Layer ) )
def lowerCAmelCase__ ( self: List[str] ) -> int:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ =model_class(_lowerCAmelCase )
UpperCAmelCase_ =inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ =[*signature.parameters.keys()]
UpperCAmelCase_ =["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def lowerCAmelCase__ ( self: int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase__ ( self: List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def lowerCAmelCase__ ( self: Optional[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =TFViTModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(_lowerCAmelCase )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class A ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self: Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224" )
UpperCAmelCase_ =self.default_image_processor
UpperCAmelCase_ =prepare_img()
UpperCAmelCase_ =image_processor(images=_lowerCAmelCase , return_tensors="tf" )
# forward pass
UpperCAmelCase_ =model(**_lowerCAmelCase )
# verify the logits
UpperCAmelCase_ =tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
UpperCAmelCase_ =tf.constant([-0.27_44, 0.82_15, -0.08_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 )
| 54
| 1
|
def a__ ( lowercase__ = 1_0_0_0_0_0_0 ):
'''simple docstring'''
UpperCAmelCase_ =limit + 1
UpperCAmelCase_ =[0] * limit
for first_term in range(1 , lowercase__ ):
for n in range(lowercase__ , lowercase__ , lowercase__ ):
UpperCAmelCase_ =first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
UpperCAmelCase_ =sum(1 for x in frequency[1:limit] if x == 1_0 )
return count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 54
|
from __future__ import annotations
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if len(lowercase__ ) == 0:
return False
UpperCAmelCase_ =len(lowercase__ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , lowercase__ )
else:
return binary_search(a_list[midpoint + 1 :] , lowercase__ )
if __name__ == "__main__":
__lowercase : Tuple =input("""Enter numbers separated by comma:\n""").strip()
__lowercase : Optional[Any] =[int(item.strip()) for item in user_input.split(""",""")]
__lowercase : List[Any] =int(input("""Enter the number to be found in the list:\n""").strip())
__lowercase : Optional[Any] ="""""" if binary_search(sequence, target) else """not """
print(f"""{target} was {not_str}found in {sequence}""")
| 54
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.