code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import re
def __UpperCamelCase ( _lowerCAmelCase ):
"""simple docstring"""
if len(re.findall("[ATCG]" , _lowerCAmelCase ) ) != len(_lowerCAmelCase ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" , "TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={
"huggingface/informer-tourism-monthly": (
"https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class __magic_name__ ( _a):
_UpperCAmelCase : Optional[Any] = 'informer'
_UpperCAmelCase : Optional[Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : List[str] ,__SCREAMING_SNAKE_CASE : Optional[int] = None ,__SCREAMING_SNAKE_CASE : Optional[int] = None ,__SCREAMING_SNAKE_CASE : str = "student_t" ,__SCREAMING_SNAKE_CASE : str = "nll" ,__SCREAMING_SNAKE_CASE : int = 1 ,__SCREAMING_SNAKE_CASE : List[int] = None ,__SCREAMING_SNAKE_CASE : Optional[Union[str, bool]] = "mean" ,__SCREAMING_SNAKE_CASE : int = 0 ,__SCREAMING_SNAKE_CASE : int = 0 ,__SCREAMING_SNAKE_CASE : int = 0 ,__SCREAMING_SNAKE_CASE : int = 0 ,__SCREAMING_SNAKE_CASE : Optional[List[int]] = None ,__SCREAMING_SNAKE_CASE : Optional[List[int]] = None ,__SCREAMING_SNAKE_CASE : int = 6_4 ,__SCREAMING_SNAKE_CASE : int = 3_2 ,__SCREAMING_SNAKE_CASE : int = 3_2 ,__SCREAMING_SNAKE_CASE : int = 2 ,__SCREAMING_SNAKE_CASE : int = 2 ,__SCREAMING_SNAKE_CASE : int = 2 ,__SCREAMING_SNAKE_CASE : int = 2 ,__SCREAMING_SNAKE_CASE : bool = True ,__SCREAMING_SNAKE_CASE : str = "gelu" ,__SCREAMING_SNAKE_CASE : float = 0.05 ,__SCREAMING_SNAKE_CASE : float = 0.1 ,__SCREAMING_SNAKE_CASE : float = 0.1 ,__SCREAMING_SNAKE_CASE : float = 0.1 ,__SCREAMING_SNAKE_CASE : float = 0.1 ,__SCREAMING_SNAKE_CASE : int = 1_0_0 ,__SCREAMING_SNAKE_CASE : float = 0.02 ,__SCREAMING_SNAKE_CASE : Optional[Any]=True ,__SCREAMING_SNAKE_CASE : str = "prob" ,__SCREAMING_SNAKE_CASE : int = 5 ,__SCREAMING_SNAKE_CASE : bool = True ,**__SCREAMING_SNAKE_CASE : List[str] ,):
# time series specific configuration
UpperCAmelCase = prediction_length
UpperCAmelCase = context_length or prediction_length
UpperCAmelCase = distribution_output
UpperCAmelCase = loss
UpperCAmelCase = input_size
UpperCAmelCase = num_time_features
UpperCAmelCase = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase = scaling
UpperCAmelCase = num_dynamic_real_features
UpperCAmelCase = num_static_real_features
UpperCAmelCase = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(__SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
UpperCAmelCase = cardinality
else:
UpperCAmelCase = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(__SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
UpperCAmelCase = embedding_dimension
else:
UpperCAmelCase = [min(5_0 ,(cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCAmelCase = d_model
UpperCAmelCase = encoder_attention_heads
UpperCAmelCase = decoder_attention_heads
UpperCAmelCase = encoder_ffn_dim
UpperCAmelCase = decoder_ffn_dim
UpperCAmelCase = encoder_layers
UpperCAmelCase = decoder_layers
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = encoder_layerdrop
UpperCAmelCase = decoder_layerdrop
UpperCAmelCase = activation_function
UpperCAmelCase = init_std
UpperCAmelCase = use_cache
# Informer
UpperCAmelCase = attention_type
UpperCAmelCase = sampling_factor
UpperCAmelCase = distil
super().__init__(is_encoder_decoder=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
@property
def _UpperCAmelCase ( self : List[str] ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 333
| 1
|
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( __a ):
"""simple docstring"""
__magic_name__ = (PNDMScheduler,)
__magic_name__ = (("num_inference_steps", 5_0),)
def a ( self , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[str] = {
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**A__ )
return config
def a ( self , snake_case__=0 , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = dict(self.forward_default_kwargs )
_lowerCAmelCase : Dict = kwargs.pop('num_inference_steps' , A__ )
_lowerCAmelCase : str = self.dummy_sample
_lowerCAmelCase : List[str] = 0.1 * sample
_lowerCAmelCase : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Optional[int] = self.get_scheduler_config(**A__ )
_lowerCAmelCase : Tuple = scheduler_class(**A__ )
scheduler.set_timesteps(A__ )
# copy over dummy past residuals
_lowerCAmelCase : Optional[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A__ )
_lowerCAmelCase : List[Any] = scheduler_class.from_pretrained(A__ )
new_scheduler.set_timesteps(A__ )
# copy over dummy past residuals
_lowerCAmelCase : List[Any] = dummy_past_residuals[:]
_lowerCAmelCase : Tuple = scheduler.step_prk(A__ , A__ , A__ , **A__ ).prev_sample
_lowerCAmelCase : int = new_scheduler.step_prk(A__ , A__ , A__ , **A__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_lowerCAmelCase : List[Any] = scheduler.step_plms(A__ , A__ , A__ , **A__ ).prev_sample
_lowerCAmelCase : Optional[Any] = new_scheduler.step_plms(A__ , A__ , A__ , **A__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def a ( self ):
'''simple docstring'''
pass
def a ( self , snake_case__=0 , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase : List[Any] = kwargs.pop('num_inference_steps' , A__ )
_lowerCAmelCase : List[str] = self.dummy_sample
_lowerCAmelCase : str = 0.1 * sample
_lowerCAmelCase : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Any = self.get_scheduler_config()
_lowerCAmelCase : Optional[int] = scheduler_class(**A__ )
scheduler.set_timesteps(A__ )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase : str = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A__ )
_lowerCAmelCase : Optional[Any] = scheduler_class.from_pretrained(A__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(A__ )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase : List[str] = dummy_past_residuals[:]
_lowerCAmelCase : Union[str, Any] = scheduler.step_prk(A__ , A__ , A__ , **A__ ).prev_sample
_lowerCAmelCase : Union[str, Any] = new_scheduler.step_prk(A__ , A__ , A__ , **A__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_lowerCAmelCase : List[Any] = scheduler.step_plms(A__ , A__ , A__ , **A__ ).prev_sample
_lowerCAmelCase : Union[str, Any] = new_scheduler.step_plms(A__ , A__ , A__ , **A__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def a ( self , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config(**A__ )
_lowerCAmelCase : Optional[Any] = scheduler_class(**A__ )
_lowerCAmelCase : List[str] = 10
_lowerCAmelCase : Tuple = self.dummy_model()
_lowerCAmelCase : List[str] = self.dummy_sample_deter
scheduler.set_timesteps(A__ )
for i, t in enumerate(scheduler.prk_timesteps ):
_lowerCAmelCase : str = model(A__ , A__ )
_lowerCAmelCase : Union[str, Any] = scheduler.step_prk(A__ , A__ , A__ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
_lowerCAmelCase : str = model(A__ , A__ )
_lowerCAmelCase : int = scheduler.step_plms(A__ , A__ , A__ ).prev_sample
return sample
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = dict(self.forward_default_kwargs )
_lowerCAmelCase : Tuple = kwargs.pop('num_inference_steps' , A__ )
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Union[str, Any] = self.get_scheduler_config()
_lowerCAmelCase : Optional[Any] = scheduler_class(**A__ )
_lowerCAmelCase : List[Any] = self.dummy_sample
_lowerCAmelCase : List[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(A__ , 'set_timesteps' ):
scheduler.set_timesteps(A__ )
elif num_inference_steps is not None and not hasattr(A__ , 'set_timesteps' ):
_lowerCAmelCase : Any = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCAmelCase : str = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_lowerCAmelCase : str = dummy_past_residuals[:]
_lowerCAmelCase : Dict = scheduler.step_prk(A__ , 0 , A__ , **A__ ).prev_sample
_lowerCAmelCase : Optional[Any] = scheduler.step_prk(A__ , 1 , A__ , **A__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_lowerCAmelCase : Any = scheduler.step_plms(A__ , 0 , A__ , **A__ ).prev_sample
_lowerCAmelCase : str = scheduler.step_plms(A__ , 1 , A__ , **A__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def a ( self ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=A__ )
def a ( self ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=A__ )
_lowerCAmelCase : Any = self.scheduler_classes[0]
_lowerCAmelCase : Any = self.get_scheduler_config(steps_offset=1 )
_lowerCAmelCase : Optional[int] = scheduler_class(**A__ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def a ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=A__ , beta_end=A__ )
def a ( self ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A__ )
def a ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A__ )
def a ( self ):
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=A__ )
def a ( self ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=A__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 27
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Dict = self.dummy_sample
_lowerCAmelCase : Optional[int] = 0.1 * sample
_lowerCAmelCase : int = self.get_scheduler_config()
_lowerCAmelCase : Any = scheduler_class(**A__ )
scheduler.set_timesteps(A__ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
_lowerCAmelCase : Dict = scheduler.step_prk(A__ , A__ , A__ ).prev_sample
def a ( self ):
'''simple docstring'''
with self.assertRaises(A__ ):
_lowerCAmelCase : List[Any] = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : Tuple = scheduler_class(**A__ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.full_loop()
_lowerCAmelCase : Optional[int] = torch.sum(torch.abs(A__ ) )
_lowerCAmelCase : Optional[int] = torch.mean(torch.abs(A__ ) )
assert abs(result_sum.item() - 198.1318 ) < 1E-2
assert abs(result_mean.item() - 0.2580 ) < 1E-3
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.full_loop(prediction_type='v_prediction' )
_lowerCAmelCase : Union[str, Any] = torch.sum(torch.abs(A__ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(A__ ) )
assert abs(result_sum.item() - 67.3986 ) < 1E-2
assert abs(result_mean.item() - 0.0878 ) < 1E-3
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.full_loop(set_alpha_to_one=A__ , beta_start=0.01 )
_lowerCAmelCase : Optional[int] = torch.sum(torch.abs(A__ ) )
_lowerCAmelCase : List[Any] = torch.mean(torch.abs(A__ ) )
assert abs(result_sum.item() - 230.0399 ) < 1E-2
assert abs(result_mean.item() - 0.2995 ) < 1E-3
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.full_loop(set_alpha_to_one=A__ , beta_start=0.01 )
_lowerCAmelCase : List[str] = torch.sum(torch.abs(A__ ) )
_lowerCAmelCase : int = torch.mean(torch.abs(A__ ) )
assert abs(result_sum.item() - 186.9482 ) < 1E-2
assert abs(result_mean.item() - 0.2434 ) < 1E-3
| 700
|
'''simple docstring'''
import pytest
import datasets
# Import fixture modules as plugins
lowerCAmelCase : List[str] = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def lowercase (_A , _A ):
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def lowercase (_A ):
"""simple docstring"""
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=_A )
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : str = tmp_path_factory.getbasetemp() / 'cache'
_lowerCAmelCase : Dict = test_hf_cache_home / 'datasets'
_lowerCAmelCase : List[Any] = test_hf_cache_home / 'metrics'
_lowerCAmelCase : List[Any] = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_A ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_A ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_A ) )
_lowerCAmelCase : Dict = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_A ) )
_lowerCAmelCase : Union[str, Any] = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_A ) )
@pytest.fixture(autouse=_A , scope='session' )
def lowercase ():
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=_A )
def lowercase (_A ):
"""simple docstring"""
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _A )
@pytest.fixture
def lowercase (_A ):
"""simple docstring"""
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _A )
| 630
| 0
|
import os
import time
import numpy as np
import onnxruntime as ort
lowerCamelCase__ : int = '1'
lowerCamelCase__ : Optional[int] = '0'
lowerCamelCase__ : Optional[Any] = '1'
lowerCamelCase__ : int = ort.SessionOptions()
lowerCamelCase__ : List[Any] = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('Create inference session...')
lowerCamelCase__ : List[str] = ['TensorrtExecutionProvider', 'CUDAExecutionProvider']
lowerCamelCase__ : List[str] = ort.InferenceSession('model.onnx', sess_options=sess_opt, providers=execution_provider)
lowerCamelCase__ : Union[str, Any] = ort.RunOptions()
lowerCamelCase__ : int = 128
lowerCamelCase__ : Dict = 1
lowerCamelCase__ : Tuple = np.ones((batch, sequence), dtype=np.intaa)
lowerCamelCase__ : Union[str, Any] = np.ones((batch, sequence), dtype=np.intaa)
lowerCamelCase__ : Any = np.ones((batch, sequence), dtype=np.intaa)
print('Warm up phase...')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('Start inference...')
lowerCamelCase__ : str = time.time()
lowerCamelCase__ : int = 2_000
lowerCamelCase__ : Any = {}
for iter in range(max_iters):
lowerCamelCase__ : str = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('Average Inference Time = {:.3f} ms'.format((time.time() - start_time) * 1_000 / max_iters))
| 31
|
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : int = 10**9):
A_ : Optional[int] = 1
A_ : int = 2
A_ : List[Any] = 0
A_ : Optional[Any] = 0
A_ : str = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
A_ : Optional[Any] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 665
| 0
|
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case_ : Union[str, Any] = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def UpperCamelCase_ ( ):
"""simple docstring"""
print(sum_of_series(1, 1, 1_0 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
a_ = logging.get_logger(__name__)
a_ = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( snake_case__ ):
UpperCAmelCase_ = """longformer"""
def __init__( self , lowercase_ = 5_12 , lowercase_ = 2 , lowercase_ = 1 , lowercase_ = 0 , lowercase_ = 2 , lowercase_ = 3_05_22 , lowercase_ = 7_68 , lowercase_ = 12 , lowercase_ = 12 , lowercase_ = 30_72 , lowercase_ = "gelu" , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 5_12 , lowercase_ = 2 , lowercase_ = 0.02 , lowercase_ = 1E-12 , lowercase_ = False , **lowercase_ , ):
super().__init__(pad_token_id=lowercase_ , **lowercase_)
snake_case_ : Dict = attention_window
snake_case_ : Tuple = sep_token_id
snake_case_ : Optional[Any] = bos_token_id
snake_case_ : str = eos_token_id
snake_case_ : Optional[int] = vocab_size
snake_case_ : Dict = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : Union[str, Any] = intermediate_size
snake_case_ : Union[str, Any] = hidden_dropout_prob
snake_case_ : Optional[Any] = attention_probs_dropout_prob
snake_case_ : List[str] = max_position_embeddings
snake_case_ : str = type_vocab_size
snake_case_ : Tuple = initializer_range
snake_case_ : List[str] = layer_norm_eps
snake_case_ : Tuple = onnx_export
class UpperCAmelCase_ ( snake_case__ ):
def __init__( self , lowercase_ , lowercase_ = "default" , lowercase_ = None):
super().__init__(lowercase_ , lowercase_ , lowercase_)
snake_case_ : Dict = True
@property
def snake_case__ ( self):
if self.task == "multiple-choice":
snake_case_ : Tuple = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case_ : int = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
])
@property
def snake_case__ ( self):
snake_case_ : Union[str, Any] = super().outputs
if self.task == "default":
snake_case_ : str = {0: "batch"}
return outputs
@property
def snake_case__ ( self):
return 1E-4
@property
def snake_case__ ( self):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14)
def snake_case__ ( self , lowercase_ , lowercase_ = -1 , lowercase_ = -1 , lowercase_ = False , lowercase_ = None , ):
snake_case_ : Optional[Any] = super().generate_dummy_inputs(
preprocessor=lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_)
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
snake_case_ : Any = torch.zeros_like(inputs["input_ids"])
# make every second token global
snake_case_ : Tuple = 1
return inputs
| 92
| 0
|
'''simple docstring'''
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 109
|
lowerCAmelCase_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
# Return True if there is node that has not iterated.
lowerCAmelCase__ : Optional[int] = [False] * len(UpperCamelCase )
lowerCAmelCase__ : Tuple = [s]
lowerCAmelCase__ : Dict = True
while queue:
lowerCAmelCase__ : int = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = True
lowerCAmelCase__ : Optional[int] = u
return visited[t]
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
lowerCAmelCase__ : Any = [-1] * (len(UpperCamelCase ))
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : Tuple = []
lowerCAmelCase__ : Optional[int] = [i[:] for i in graph] # Record original cut, copy.
while bfs(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : List[Any] = float('''Inf''' )
lowerCAmelCase__ : Dict = sink
while s != source:
# Find the minimum value in select path
lowerCAmelCase__ : Tuple = min(UpperCamelCase , graph[parent[s]][s] )
lowerCAmelCase__ : List[Any] = parent[s]
max_flow += path_flow
lowerCAmelCase__ : List[Any] = sink
while v != source:
lowerCAmelCase__ : Dict = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCAmelCase__ : Optional[Any] = parent[v]
for i in range(len(UpperCamelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 678
| 0
|
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : int = 4000000 ):
'''simple docstring'''
_lowerCAmelCase = [0, 1]
_lowerCAmelCase = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
_lowerCAmelCase = 0
for j in range(len(SCREAMING_SNAKE_CASE_ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 716
|
'''simple docstring'''
import math
def __a(SCREAMING_SNAKE_CASE_ : int = 100 ):
'''simple docstring'''
_lowerCAmelCase = sum(i * i for i in range(1 , n + 1 ) )
_lowerCAmelCase = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 489
| 0
|
'''simple docstring'''
def __lowerCamelCase ( __snake_case : int, __snake_case : int ) -> int:
"""simple docstring"""
return abs(UpperCAmelCase__ ) if a == 0 else greatest_common_divisor(b % a, UpperCAmelCase__ )
def __lowerCamelCase ( __snake_case : int, __snake_case : int ) -> int:
"""simple docstring"""
while y: # --> when y=0 then loop will terminate and return x as final GCD.
A__ , A__ : List[Any] =y, x % y
return abs(UpperCAmelCase__ )
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
try:
A__ : List[Any] =input("""Enter two integers separated by comma (,): """ ).split(""",""" )
A__ : List[str] =int(nums[0] )
A__ : Dict =int(nums[1] )
print(
f"greatest_common_divisor({num_a}, {num_a}) = "
f"{greatest_common_divisor(UpperCAmelCase__, UpperCAmelCase__ )}" )
print(f"By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(UpperCAmelCase__, UpperCAmelCase__ )}" )
except (IndexError, UnboundLocalError, ValueError):
print("""Wrong input""" )
if __name__ == "__main__":
main()
| 215
|
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
lowercase = {
'''<''': operator.lt,
'''<=''': operator.le,
'''==''': operator.eq,
'''!=''': operator.ne,
'''>=''': operator.ge,
'''>''': operator.gt,
}
def __lowerCAmelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ) -> Union[str, Any]:
if got_ver is None or want_ver is None:
raise ValueError(
F'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
F''' reinstalling {pkg}.''' )
if not ops[op](version.parse(UpperCAmelCase__ ) , version.parse(UpperCAmelCase__ ) ):
raise ImportError(
F'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def __lowerCAmelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> None:
lowerCamelCase_ = F'''\n{hint}''' if hint is not None else """"""
# non-versioned check
if re.match(R"""^[\w_\-\d]+$""" , UpperCAmelCase__ ):
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = requirement, None, None
else:
lowerCamelCase_ = re.findall(R"""^([^!=<>\s]+)([\s!=<>]{1,2}.+)""" , UpperCAmelCase__ )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"""
F''' got {requirement}''' )
lowerCamelCase_ , lowerCamelCase_ = match[0]
lowerCamelCase_ = want_full.split(""",""" ) # there could be multiple requirements
lowerCamelCase_ = {}
for w in want_range:
lowerCamelCase_ = re.findall(R"""^([\s!=<>]{1,2})(.+)""" , UpperCAmelCase__ )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"""
F''' but got {requirement}''' )
lowerCamelCase_ , lowerCamelCase_ = match[0]
lowerCamelCase_ = want_ver
if op not in ops:
raise ValueError(F'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
lowerCamelCase_ = """.""".join([str(UpperCAmelCase__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return
# check if any version is installed
try:
lowerCamelCase_ = importlib.metadata.version(UpperCAmelCase__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def __lowerCAmelCase ( UpperCAmelCase__ : List[str] ) -> Dict:
lowerCamelCase_ = """Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"""
return require_version(UpperCAmelCase__ , UpperCAmelCase__ )
| 272
| 0
|
def UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
'''simple docstring'''
_A= ''
for word_or_phrase in separated:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise Exception('join() accepts only strings to be joined' )
joined += word_or_phrase + separator
return joined.strip(lowerCAmelCase_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 476
|
from jiwer import compute_measures
import datasets
UpperCAmelCase_ = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
UpperCAmelCase_ = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
UpperCAmelCase_ = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
def a__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
] , )
def a__ ( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=False ):
if concatenate_texts:
return compute_measures(lowerCAmelCase__ , lowerCAmelCase__ )["wer"]
else:
_A= 0
_A= 0
for prediction, reference in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
_A= compute_measures(lowerCAmelCase__ , lowerCAmelCase__ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 476
| 1
|
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
UpperCamelCase = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"
UpperCamelCase = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"
UpperCamelCase = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
return float((preds == labels).mean() )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
_lowercase : Optional[int] = simple_accuracy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowercase : int = float(fa_score(y_true=SCREAMING_SNAKE_CASE , y_pred=SCREAMING_SNAKE_CASE ) )
return {
"accuracy": acc,
"f1": fa,
}
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
_lowercase : Optional[Any] = float(pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )
_lowercase : int = float(spearmanr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def __a ( self ):
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase ):
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(_lowerCAmelCase , _lowerCAmelCase )}
elif self.config_name == "stsb":
return pearson_and_spearman(_lowerCAmelCase , _lowerCAmelCase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(_lowerCAmelCase , _lowerCAmelCase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
| 66
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCamelCase = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66
| 1
|
from PIL import Image
def A_ ( __a : Image ):
"""simple docstring"""
a__ , a__ = image.size
a__ = 0
a__ = image.load()
for i in range(__a ):
for j in range(__a ):
a__ = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(__a ):
for i in range(__a ):
a__ = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
UpperCAmelCase = mean_threshold(Image.open("""path_to_image""").convert("""L"""))
image.save("""output_image_path""")
| 351
|
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class __snake_case ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE):
'''simple docstring'''
@register_to_config
def __init__( self , a_ = 128 , a_ = 256 , a_ = 2_000.0 , a_ = 768 , a_ = 12 , a_ = 12 , a_ = 64 , a_ = 2_048 , a_ = 0.1 , ):
super().__init__()
a__ = nn.Sequential(
nn.Linear(a_ , d_model * 4 , bias=a_ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=a_ ) , nn.SiLU() , )
a__ = nn.Embedding(a_ , a_ )
a__ = False
a__ = nn.Linear(a_ , a_ , bias=a_ )
a__ = nn.Dropout(p=a_ )
a__ = nn.ModuleList()
for lyr_num in range(a_ ):
# FiLM conditional T5 decoder
a__ = DecoderLayer(d_model=a_ , d_kv=a_ , num_heads=a_ , d_ff=a_ , dropout_rate=a_ )
self.decoders.append(a_ )
a__ = TaLayerNorm(a_ )
a__ = nn.Dropout(p=a_ )
a__ = nn.Linear(a_ , a_ , bias=a_ )
def _a ( self , a_ , a_ ):
a__ = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def _a ( self , a_ , a_ , a_ ):
a__ , a__ , a__ = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
a__ = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
a__ = self.conditioning_emb(a_ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
a__ = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
a__ = torch.broadcast_to(
torch.arange(a_ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
a__ = self.position_encoding(a_ )
a__ = self.continuous_inputs_projection(a_ )
inputs += position_encodings
a__ = self.dropout(a_ )
# decoder: No padding present.
a__ = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
a__ = [(x, self.encoder_decoder_mask(a_ , a_ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
a__ = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
a__ = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
a__ = lyr(
a_ , conditioning_emb=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , )[0]
a__ = self.decoder_norm(a_ )
a__ = self.post_dropout(a_ )
a__ = self.spec_out(a_ )
return spec_out
class __snake_case ( nn.Module):
'''simple docstring'''
def __init__( self , a_ , a_ , a_ , a_ , a_ , a_=1E-6 ):
super().__init__()
a__ = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=a_ , d_kv=a_ , num_heads=a_ , dropout_rate=a_ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=a_ , d_kv=a_ , num_heads=a_ , dropout_rate=a_ , layer_norm_epsilon=a_ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=a_ , d_ff=a_ , dropout_rate=a_ , layer_norm_epsilon=a_ ) )
def _a ( self , a_ , a_=None , a_=None , a_=None , a_=None , a_=None , ):
a__ = self.layer[0](
a_ , conditioning_emb=a_ , attention_mask=a_ , )
if encoder_hidden_states is not None:
a__ = torch.where(encoder_attention_mask > 0 , 0 , -1E1_0 ).to(
encoder_hidden_states.dtype )
a__ = self.layer[1](
a_ , key_value_states=a_ , attention_mask=a_ , )
# Apply Film Conditional Feed Forward layer
a__ = self.layer[-1](a_ , a_ )
return (hidden_states,)
class __snake_case ( nn.Module):
'''simple docstring'''
def __init__( self , a_ , a_ , a_ , a_ ):
super().__init__()
a__ = TaLayerNorm(a_ )
a__ = TaFiLMLayer(in_features=d_model * 4 , out_features=a_ )
a__ = Attention(query_dim=a_ , heads=a_ , dim_head=a_ , out_bias=a_ , scale_qk=a_ )
a__ = nn.Dropout(a_ )
def _a ( self , a_ , a_=None , a_=None , ):
# pre_self_attention_layer_norm
a__ = self.layer_norm(a_ )
if conditioning_emb is not None:
a__ = self.FiLMLayer(a_ , a_ )
# Self-attention block
a__ = self.attention(a_ )
a__ = hidden_states + self.dropout(a_ )
return hidden_states
class __snake_case ( nn.Module):
'''simple docstring'''
def __init__( self , a_ , a_ , a_ , a_ , a_ ):
super().__init__()
a__ = Attention(query_dim=a_ , heads=a_ , dim_head=a_ , out_bias=a_ , scale_qk=a_ )
a__ = TaLayerNorm(a_ , eps=a_ )
a__ = nn.Dropout(a_ )
def _a ( self , a_ , a_=None , a_=None , ):
a__ = self.layer_norm(a_ )
a__ = self.attention(
a_ , encoder_hidden_states=a_ , attention_mask=attention_mask.squeeze(1 ) , )
a__ = hidden_states + self.dropout(a_ )
return layer_output
class __snake_case ( nn.Module):
'''simple docstring'''
def __init__( self , a_ , a_ , a_ , a_ ):
super().__init__()
a__ = TaDenseGatedActDense(d_model=a_ , d_ff=a_ , dropout_rate=a_ )
a__ = TaFiLMLayer(in_features=d_model * 4 , out_features=a_ )
a__ = TaLayerNorm(a_ , eps=a_ )
a__ = nn.Dropout(a_ )
def _a ( self , a_ , a_=None ):
a__ = self.layer_norm(a_ )
if conditioning_emb is not None:
a__ = self.film(a_ , a_ )
a__ = self.DenseReluDense(a_ )
a__ = hidden_states + self.dropout(a_ )
return hidden_states
class __snake_case ( nn.Module):
'''simple docstring'''
def __init__( self , a_ , a_ , a_ ):
super().__init__()
a__ = nn.Linear(a_ , a_ , bias=a_ )
a__ = nn.Linear(a_ , a_ , bias=a_ )
a__ = nn.Linear(a_ , a_ , bias=a_ )
a__ = nn.Dropout(a_ )
a__ = NewGELUActivation()
def _a ( self , a_ ):
a__ = self.act(self.wi_a(a_ ) )
a__ = self.wi_a(a_ )
a__ = hidden_gelu * hidden_linear
a__ = self.dropout(a_ )
a__ = self.wo(a_ )
return hidden_states
class __snake_case ( nn.Module):
'''simple docstring'''
def __init__( self , a_ , a_=1E-6 ):
super().__init__()
a__ = nn.Parameter(torch.ones(a_ ) )
a__ = eps
def _a ( self , a_ ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
a__ = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=a_ )
a__ = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
a__ = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class __snake_case ( nn.Module):
'''simple docstring'''
def _a ( self , a_ ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(a_ , 3.0 )) ))
class __snake_case ( nn.Module):
'''simple docstring'''
def __init__( self , a_ , a_ ):
super().__init__()
a__ = nn.Linear(a_ , out_features * 2 , bias=a_ )
def _a ( self , a_ , a_ ):
a__ = self.scale_bias(a_ )
a__ , a__ = torch.chunk(a_ , 2 , -1 )
a__ = x * (1 + scale) + shift
return x
| 351
| 1
|
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Dict ):
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] ):
a__ = tmp_path / 'cache'
a__ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
a__ = JsonDatasetReader(__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase ).read()
_check_json_dataset(__lowerCAmelCase , __lowerCAmelCase )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] ):
a__ = tmp_path / 'cache'
a__ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a__ = features.copy() if features else default_expected_features
a__ = (
Features({feature: Value(__lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
a__ = JsonDatasetReader(__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read()
_check_json_dataset(__lowerCAmelCase , __lowerCAmelCase )
@pytest.mark.parametrize(
'features' , [
None,
{'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'},
] , )
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Any ):
a__ = tmp_path / 'cache'
a__ = {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}
a__ = features.copy() if features else default_expected_features
a__ = (
Features({feature: Value(__lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
a__ = JsonDatasetReader(__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read()
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
a__ = {'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'}
a__ = features.copy()
a__ = (
Features({feature: Value(__lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
a__ = tmp_path / 'cache'
a__ = JsonDatasetReader(__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read()
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] ):
a__ = tmp_path / 'cache'
a__ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a__ = JsonDatasetReader(__lowerCAmelCase , cache_dir=__lowerCAmelCase , split=__lowerCAmelCase ).read()
_check_json_dataset(__lowerCAmelCase , __lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any ):
if issubclass(__lowerCAmelCase , __lowerCAmelCase ):
a__ = jsonl_path
elif issubclass(__lowerCAmelCase , __lowerCAmelCase ):
a__ = [jsonl_path]
a__ = tmp_path / 'cache'
a__ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a__ = JsonDatasetReader(__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read()
_check_json_dataset(__lowerCAmelCase , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Any=("train",) ):
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
for split in splits:
a__ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any ):
a__ = tmp_path / 'cache'
a__ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
a__ = JsonDatasetReader({'train': jsonl_path} , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase ).read()
_check_json_datasetdict(__lowerCAmelCase , __lowerCAmelCase )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def __lowercase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] ):
a__ = tmp_path / 'cache'
a__ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a__ = features.copy() if features else default_expected_features
a__ = (
Features({feature: Value(__lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
a__ = JsonDatasetReader({'train': jsonl_path} , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read()
_check_json_datasetdict(__lowerCAmelCase , __lowerCAmelCase )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple ):
if split:
a__ = {split: jsonl_path}
else:
a__ = 'train'
a__ = {'train': jsonl_path, 'test': jsonl_path}
a__ = tmp_path / 'cache'
a__ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a__ = JsonDatasetReader(__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read()
_check_json_datasetdict(__lowerCAmelCase , __lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __lowercase ( __lowerCAmelCase : Optional[Any] ):
return json.load(__lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Tuple ):
return [json.loads(__lowerCAmelCase ) for line in buffer]
class snake_case_ :
@pytest.mark.parametrize('lines, load_json_function' ,[(True, load_json_lines), (False, load_json)] )
def lowerCamelCase__( self :List[str] ,__snake_case :str ,__snake_case :Optional[int] ,__snake_case :Union[str, Any] ) -> Tuple:
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case ,__snake_case ,lines=__snake_case ).write()
buffer.seek(0 )
a__ = load_json_function(__snake_case )
assert isinstance(__snake_case ,__snake_case )
assert isinstance(exported_content[0] ,__snake_case )
assert len(__snake_case ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' ,[
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] ,)
def lowerCamelCase__( self :Any ,__snake_case :Any ,__snake_case :Dict ,__snake_case :Any ,__snake_case :str ,__snake_case :Optional[Any] ) -> str:
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case ,__snake_case ,lines=__snake_case ,orient=__snake_case ).write()
buffer.seek(0 )
a__ = load_json(__snake_case )
assert isinstance(__snake_case ,__snake_case )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__snake_case ,'keys' ) and not hasattr(exported_content[0] ,'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__snake_case ) == 10
@pytest.mark.parametrize('lines, load_json_function' ,[(True, load_json_lines), (False, load_json)] )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Dict ,__snake_case :Optional[Any] ,__snake_case :int ) -> str:
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case ,__snake_case ,lines=__snake_case ,num_proc=2 ).write()
buffer.seek(0 )
a__ = load_json_function(__snake_case )
assert isinstance(__snake_case ,__snake_case )
assert isinstance(exported_content[0] ,__snake_case )
assert len(__snake_case ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' ,[
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] ,)
def lowerCamelCase__( self :int ,__snake_case :str ,__snake_case :Dict ,__snake_case :Tuple ,__snake_case :Optional[int] ,__snake_case :Any ) -> int:
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case ,__snake_case ,lines=__snake_case ,orient=__snake_case ,num_proc=2 ).write()
buffer.seek(0 )
a__ = load_json(__snake_case )
assert isinstance(__snake_case ,__snake_case )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__snake_case ,'keys' ) and not hasattr(exported_content[0] ,'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__snake_case ) == 10
def lowerCamelCase__( self :Optional[int] ,__snake_case :Any ) -> Any:
with pytest.raises(__snake_case ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case ,__snake_case ,num_proc=0 )
@pytest.mark.parametrize('compression, extension' ,[('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] )
def lowerCamelCase__( self :List[Any] ,__snake_case :Union[str, Any] ,__snake_case :Optional[Any] ,__snake_case :Dict ,__snake_case :Dict ,__snake_case :str ) -> List[str]:
a__ = tmp_path_factory.mktemp('data' ) / F'test.json.{extension}'
a__ = str(shared_datadir / F'test_file.json.{extension}' )
JsonDatasetWriter(__snake_case ,__snake_case ,compression=__snake_case ).write()
with fsspec.open(__snake_case ,'rb' ,compression='infer' ) as f:
a__ = f.read()
with fsspec.open(__snake_case ,'rb' ,compression='infer' ) as f:
a__ = f.read()
assert exported_content == original_content
| 335
|
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
snake_case : str = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
snake_case : str = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
snake_case : Union[str, Any] = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] ):
return float((preds == labels).mean() )
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple ):
a__ = simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )
a__ = float(fa_score(y_true=__lowerCAmelCase , y_pred=__lowerCAmelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : str ):
a__ = float(pearsonr(__lowerCAmelCase , __lowerCAmelCase )[0] )
a__ = float(spearmanr(__lowerCAmelCase , __lowerCAmelCase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ (datasets.Metric ):
def lowerCamelCase__( self :str ) -> Any:
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) ,codebase_urls=[] ,reference_urls=[] ,format='numpy' ,)
def lowerCamelCase__( self :List[Any] ,__snake_case :str ,__snake_case :List[str] ) -> Optional[Any]:
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(__snake_case ,__snake_case )}
elif self.config_name == "stsb":
return pearson_and_spearman(__snake_case ,__snake_case )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(__snake_case ,__snake_case )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(__snake_case ,__snake_case )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
| 335
| 1
|
'''simple docstring'''
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = PriorTransformer
__magic_name__ = """hidden_states"""
@property
def _lowerCamelCase ( self ) -> Dict:
_A : Union[str, Any] = 4
_A : List[Any] = 8
_A : Optional[Any] = 7
_A : int = floats_tensor((batch_size, embedding_dim) ).to(__A )
_A : Union[str, Any] = floats_tensor((batch_size, embedding_dim) ).to(__A )
_A : Optional[Any] = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(__A )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _lowerCamelCase ( self , UpperCAmelCase__=0 ) -> str:
torch.manual_seed(__A )
_A : List[str] = 4
_A : int = 8
_A : str = 7
_A : Any = torch.randn((batch_size, embedding_dim) ).to(__A )
_A : int = torch.randn((batch_size, embedding_dim) ).to(__A )
_A : int = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(__A )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def _lowerCamelCase ( self ) -> int:
return (4, 8)
@property
def _lowerCamelCase ( self ) -> Dict:
return (4, 8)
def _lowerCamelCase ( self ) -> List[str]:
_A : int = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 4,
'''num_layers''': 2,
'''embedding_dim''': 8,
'''num_embeddings''': 7,
'''additional_embeddings''': 4,
}
_A : str = self.dummy_input
return init_dict, inputs_dict
def _lowerCamelCase ( self ) -> str:
_A , _A : Tuple = PriorTransformer.from_pretrained(
'''hf-internal-testing/prior-dummy''' , output_loading_info=__A )
self.assertIsNotNone(__A )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(__A )
_A : List[Any] = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def _lowerCamelCase ( self ) -> List[Any]:
_A , _A : Optional[Any] = self.prepare_init_args_and_inputs_for_common()
_A : Any = self.model_class(**__A )
_A : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : Dict = [*signature.parameters.keys()]
_A : Any = ['''hidden_states''', '''timestep''']
self.assertListEqual(arg_names[:2] , __A )
def _lowerCamelCase ( self ) -> int:
_A : int = PriorTransformer.from_pretrained('''hf-internal-testing/prior-dummy''' )
_A : Optional[int] = model.to(__A )
if hasattr(__A , '''set_default_attn_processor''' ):
model.set_default_attn_processor()
_A : Optional[int] = self.get_dummy_seed_input()
with torch.no_grad():
_A : List[Any] = model(**__A )[0]
_A : str = output[0, :5].flatten().cpu()
print(__A )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
_A : List[Any] = torch.tensor([-1.3_4_3_6, -0.2_8_7_0, 0.7_5_3_8, 0.4_3_6_8, -0.0_2_3_9] )
self.assertTrue(torch_all_close(__A , __A , rtol=1e-2 ) )
@slow
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCamelCase ( self , UpperCAmelCase__=1 , UpperCAmelCase__=7_6_8 , UpperCAmelCase__=7_7 , UpperCAmelCase__=0 ) -> Any:
torch.manual_seed(__A )
_A : Optional[int] = batch_size
_A : Any = embedding_dim
_A : Optional[Any] = num_embeddings
_A : Dict = torch.randn((batch_size, embedding_dim) ).to(__A )
_A : List[str] = torch.randn((batch_size, embedding_dim) ).to(__A )
_A : Optional[Any] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(__A )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _lowerCamelCase ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[1_3, [-0.5_8_6_1, 0.1_2_8_3, -0.0_9_3_1, 0.0_8_8_2, 0.4_4_7_6, 0.1_3_2_9, -0.0_4_9_8, 0.0_6_4_0]],
[3_7, [-0.4_9_1_3, 0.0_1_1_0, -0.0_4_8_3, 0.0_5_4_1, 0.4_9_5_4, -0.0_1_7_0, 0.0_3_5_4, 0.1_6_5_1]],
# fmt: on
] )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> Dict:
_A : Optional[Any] = PriorTransformer.from_pretrained('''kandinsky-community/kandinsky-2-1-prior''' , subfolder='''prior''' )
model.to(__A )
_A : Any = self.get_dummy_seed_input(seed=__A )
with torch.no_grad():
_A : Tuple = model(**__A )[0]
assert list(sample.shape ) == [1, 7_6_8]
_A : int = sample[0, :8].flatten().cpu()
print(__A )
_A : Dict = torch.tensor(__A )
assert torch_all_close(__A , __A , atol=1e-3 )
| 718
|
'''simple docstring'''
def lowercase ( lowerCAmelCase : int = 100_0000):
"""simple docstring"""
_A : Any = 1
_A : str = 1
_A : Dict = {1: 1}
for inputa in range(2 , lowerCAmelCase):
_A : Any = 0
_A : str = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_A : Any = (3 * number) + 1
counter += 1
if inputa not in counters:
_A : Dict = counter
if counter > pre_counter:
_A : List[Any] = inputa
_A : str = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 417
| 0
|
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
_lowerCamelCase = """
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
"""
class _snake_case (unittest.TestCase , __SCREAMING_SNAKE_CASE):
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = load_tool("text-question-answering" )
self.tool.setup()
UpperCAmelCase_ : Any = load_tool("text-question-answering" ,remote=_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = self.tool(_snake_case ,"What did Hugging Face do in April 2021?" )
self.assertEqual(_snake_case ,"launched the BigScience Research Workshop" )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = self.remote_tool(_snake_case ,"What did Hugging Face do in April 2021?" )
self.assertEqual(_snake_case ,"launched the BigScience Research Workshop" )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Union[str, Any] = self.tool(text=_snake_case ,question="What did Hugging Face do in April 2021?" )
self.assertEqual(_snake_case ,"launched the BigScience Research Workshop" )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = self.remote_tool(text=_snake_case ,question="What did Hugging Face do in April 2021?" )
self.assertEqual(_snake_case ,"launched the BigScience Research Workshop" )
| 71
|
'''simple docstring'''
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowerCamelCase ( lowercase_ , lowercase_ ):
'''simple docstring'''
__snake_case = 1
@register_to_config
def __init__( self : int , lowerCAmelCase_ : int = 10_00 , lowerCAmelCase_ : Optional[Union[np.ndarray, List[float]]] = None ) -> Union[str, Any]:
'''simple docstring'''
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(lowerCAmelCase_ )
# standard deviation of the initial noise distribution
A__ : Union[str, Any] =1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
A__ : str =4
# running values
A__ : Optional[int] =[]
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, torch.device] = None ) -> Tuple:
'''simple docstring'''
A__ : int =num_inference_steps
A__ : str =torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
A__ : Optional[int] =torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
A__ : Tuple =torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
A__ : Optional[Any] =torch.sin(steps * math.pi / 2 ) ** 2
A__ : Optional[Any] =(1.0 - self.betas**2) ** 0.5
A__ : Union[str, Any] =(torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
A__ : str =timesteps.to(lowerCAmelCase_ )
A__ : str =[]
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : int , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True , ) -> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
A__ : Optional[int] =(self.timesteps == timestep).nonzero().item()
A__ : List[str] =timestep_index + 1
A__ : List[Any] =sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(lowerCAmelCase_ )
if len(self.ets ) == 1:
A__ : Union[str, Any] =self.ets[-1]
elif len(self.ets ) == 2:
A__ : Union[str, Any] =(3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
A__ : int =(23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
A__ : Dict =(1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
A__ : str =self._get_prev_sample(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase_ )
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : torch.FloatTensor , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : int ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def lowercase__ ( self : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] ) -> Dict:
'''simple docstring'''
A__ : Tuple =self.alphas[timestep_index]
A__ : List[Any] =self.betas[timestep_index]
A__ : int =self.alphas[prev_timestep_index]
A__ : List[str] =self.betas[prev_timestep_index]
A__ : int =(sample - sigma * ets) / max(lowerCAmelCase_ , 1e-8 )
A__ : Dict =next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : str ) -> Optional[int]:
'''simple docstring'''
return self.config.num_train_timesteps
| 215
| 0
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__a = TypeVar("KEY")
__a = TypeVar("VAL")
@dataclass(frozen=UpperCamelCase_ , slots=UpperCamelCase_ )
class lowerCamelCase ( Generic[KEY, VAL] ):
'''simple docstring'''
_A : Optional[int] = 4_2
_A : Tuple = 4_2
class lowerCamelCase ( _Item ):
'''simple docstring'''
def __init__( self: Tuple ) -> str:
super().__init__(__A , __A )
def __bool__( self: Dict ) -> Optional[Any]:
return False
__a = _DeletedItem()
class lowerCamelCase ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self: Optional[Any] , snake_case: int = 8 , snake_case: float = 0.7_5 ) -> Optional[int]:
snake_case_ :Dict = initial_block_size
snake_case_ :list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
snake_case_ :Optional[Any] = capacity_factor
snake_case_ :str = 0
def lowerCAmelCase_ ( self: List[str] , snake_case: KEY ) -> List[str]:
return hash(__A ) % len(self._buckets )
def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: int ) -> Tuple:
return (ind + 1) % len(self._buckets )
def lowerCAmelCase_ ( self: Optional[Any] , snake_case: int , snake_case: KEY , snake_case: VAL ) -> str:
snake_case_ :Tuple = self._buckets[ind]
if not stored:
snake_case_ :List[Any] = _Item(__A , __A )
self._len += 1
return True
elif stored.key == key:
snake_case_ :List[str] = _Item(__A , __A )
return True
else:
return False
def lowerCAmelCase_ ( self: Dict ) -> Union[str, Any]:
snake_case_ :Optional[Any] = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__A )
def lowerCAmelCase_ ( self: Dict ) -> Union[str, Any]:
if len(self._buckets ) <= self._initial_block_size:
return False
snake_case_ :Optional[Any] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def lowerCAmelCase_ ( self: Optional[int] , snake_case: int ) -> List[str]:
snake_case_ :Optional[Any] = self._buckets
snake_case_ :Optional[int] = [None] * new_size
snake_case_ :Any = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def lowerCAmelCase_ ( self: int ) -> List[str]:
self._resize(len(self._buckets ) * 2 )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> int:
self._resize(len(self._buckets ) // 2 )
def lowerCAmelCase_ ( self: int , snake_case: KEY ) -> Optional[int]:
snake_case_ :List[Any] = self._get_bucket_index(__A )
for _ in range(len(self._buckets ) ):
yield ind
snake_case_ :int = self._get_next_ind(__A )
def lowerCAmelCase_ ( self: Tuple , snake_case: KEY , snake_case: VAL ) -> int:
for ind in self._iterate_buckets(__A ):
if self._try_set(__A , __A , __A ):
break
def __setitem__( self: Union[str, Any] , snake_case: KEY , snake_case: VAL ) -> Dict:
if self._is_full():
self._size_up()
self._add_item(__A , __A )
def __delitem__( self: List[str] , snake_case: KEY ) -> Any:
for ind in self._iterate_buckets(__A ):
snake_case_ :str = self._buckets[ind]
if item is None:
raise KeyError(__A )
if item is _deleted:
continue
if item.key == key:
snake_case_ :str = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self: str , snake_case: KEY ) -> int:
for ind in self._iterate_buckets(__A ):
snake_case_ :Dict = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__A )
def __len__( self: int ) -> List[str]:
return self._len
def __iter__( self: List[str] ) -> Optional[Any]:
yield from (item.key for item in self._buckets if item)
def __repr__( self: str ) -> int:
snake_case_ :Union[str, Any] = " ,".join(
f"""{item.key}: {item.val}""" for item in self._buckets if item )
return f"""HashMap({val_string})"""
| 703
|
"""simple docstring"""
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
__a = logging.get_logger(__name__)
class lowerCamelCase :
'''simple docstring'''
_A : Union[str, Any] = None
@experimental
def A_ ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
_lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase )
return _map_with_joblib(_lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase )
def A_ ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :List[str] = num_proc if num_proc <= len(_lowercase ) else len(_lowercase )
snake_case_ :int = [] # We organize the splits ourselve (contiguous splits)
for index in range(_lowercase ):
snake_case_ :List[str] = len(_lowercase ) // num_proc
snake_case_ :Any = len(_lowercase ) % num_proc
snake_case_ :Optional[int] = div * index + min(_lowercase, _lowercase )
snake_case_ :Union[str, Any] = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(_lowercase ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
f"""Error dividing inputs iterable among processes. """
f"""Total number of objects {len(_lowercase )}, """
f"""length: {sum(len(i[1] ) for i in split_kwds )}""" )
logger.info(
f"""Spawning {num_proc} processes for {len(_lowercase )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" )
snake_case_, snake_case_ :Optional[int] = None, None
if not disable_tqdm:
snake_case_, snake_case_ :List[str] = (RLock(),), tqdm.set_lock
with Pool(_lowercase, initargs=_lowercase, initializer=_lowercase ) as pool:
snake_case_ :Optional[Any] = pool.map(_lowercase, _lowercase )
logger.info(f"""Finished {num_proc} processes""" )
snake_case_ :Optional[int] = [obj for proc_res in mapped for obj in proc_res]
logger.info(f"""Unpacked {len(_lowercase )} objects""" )
return mapped
def A_ ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name, n_jobs=_lowercase ):
return joblib.Parallel()(
joblib.delayed(_lowercase )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Dict = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
snake_case_ :Optional[int] = None
| 310
| 0
|
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class a_ ( TensorFormatter[Mapping, """torch.Tensor""", Mapping] ):
def __init__( self , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(features=_SCREAMING_SNAKE_CASE )
UpperCamelCase = torch_tensor_kwargs
import torch # noqa import torch at initialization
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
import torch
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and column:
if all(
isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(_SCREAMING_SNAKE_CASE )
return column
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
import torch
if isinstance(_SCREAMING_SNAKE_CASE , (str, bytes, type(_SCREAMING_SNAKE_CASE )) ):
return value
elif isinstance(_SCREAMING_SNAKE_CASE , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCamelCase = {}
if isinstance(_SCREAMING_SNAKE_CASE , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
UpperCamelCase = {"""dtype""": torch.intaa}
elif isinstance(_SCREAMING_SNAKE_CASE , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCamelCase = {"""dtype""": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_SCREAMING_SNAKE_CASE , PIL.Image.Image ):
UpperCamelCase = np.asarray(_SCREAMING_SNAKE_CASE )
return torch.tensor(_SCREAMING_SNAKE_CASE , **{**default_dtype, **self.torch_tensor_kwargs} )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
import torch
# support for torch, tf, jax etc.
if hasattr(_SCREAMING_SNAKE_CASE , """__array__""" ) and not isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
UpperCamelCase = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_SCREAMING_SNAKE_CASE ) for substruct in data_struct] )
elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(_SCREAMING_SNAKE_CASE ) for substruct in data_struct] )
return self._tensorize(_SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return map_nested(self._recursive_tensorize , _SCREAMING_SNAKE_CASE , map_list=_SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Mapping:
"""simple docstring"""
UpperCamelCase = self.numpy_arrow_extractor().extract_row(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.python_features_decoder.decode_row(_SCREAMING_SNAKE_CASE )
return self.recursive_tensorize(_SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> "torch.Tensor":
"""simple docstring"""
UpperCamelCase = self.numpy_arrow_extractor().extract_column(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.python_features_decoder.decode_column(_SCREAMING_SNAKE_CASE , pa_table.column_names[0] )
UpperCamelCase = self.recursive_tensorize(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self._consolidate(_SCREAMING_SNAKE_CASE )
return column
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Mapping:
"""simple docstring"""
UpperCamelCase = self.numpy_arrow_extractor().extract_batch(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.python_features_decoder.decode_batch(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.recursive_tensorize(_SCREAMING_SNAKE_CASE )
for column_name in batch:
UpperCamelCase = self._consolidate(batch[column_name] )
return batch
| 301
|
'''simple docstring'''
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowercase__ ( __UpperCamelCase )-> int:
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(__UpperCamelCase ):
return ext
raise Exception(
F"Unable to determine file format from file extension {path}. "
F"Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}" )
def lowercase__ ( __UpperCamelCase )-> List[str]:
UpperCamelCase = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
UpperCamelCase = try_infer_format_from_ext(args.input ) if args.format == """infer""" else args.format
UpperCamelCase = PipelineDataFormat.from_str(
format=__UpperCamelCase , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(__UpperCamelCase , __UpperCamelCase )
class a_ ( lowerCamelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase = nlp
UpperCamelCase = reader
@staticmethod
def A__ ( _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase = parser.add_parser("""run""" , help="""Run a pipeline through the CLI""" )
run_parser.add_argument("""--task""" , choices=get_supported_tasks() , help="""Task to run""" )
run_parser.add_argument("""--input""" , type=_SCREAMING_SNAKE_CASE , help="""Path to the file to use for inference""" )
run_parser.add_argument("""--output""" , type=_SCREAMING_SNAKE_CASE , help="""Path to the file that will be used post to write results.""" )
run_parser.add_argument("""--model""" , type=_SCREAMING_SNAKE_CASE , help="""Name or path to the model to instantiate.""" )
run_parser.add_argument("""--config""" , type=_SCREAMING_SNAKE_CASE , help="""Name or path to the model's config to instantiate.""" )
run_parser.add_argument(
"""--tokenizer""" , type=_SCREAMING_SNAKE_CASE , help="""Name of the tokenizer to use. (default: same as the model name)""" )
run_parser.add_argument(
"""--column""" , type=_SCREAMING_SNAKE_CASE , help="""Name of the column to use as input. (For multi columns input as QA use column1,columns2)""" , )
run_parser.add_argument(
"""--format""" , type=_SCREAMING_SNAKE_CASE , default="""infer""" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="""Input format to read from""" , )
run_parser.add_argument(
"""--device""" , type=_SCREAMING_SNAKE_CASE , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
run_parser.add_argument("""--overwrite""" , action="""store_true""" , help="""Allow overwriting the output file.""" )
run_parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self._nlp, []
for entry in self._reader:
UpperCamelCase = nlp(**_SCREAMING_SNAKE_CASE ) if self._reader.is_multi_columns else nlp(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
outputs.append(_SCREAMING_SNAKE_CASE )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
UpperCamelCase = self._reader.save_binary(_SCREAMING_SNAKE_CASE )
logger.warning(F"Current pipeline requires output to be in binary format, saving at {binary_path}" )
else:
self._reader.save(_SCREAMING_SNAKE_CASE )
| 301
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _a ( _UpperCAmelCase ):
'''simple docstring'''
lowerCamelCase_ : Dict = 'vivit'
def __init__( self , __UpperCAmelCase=224 , __UpperCAmelCase=32 , __UpperCAmelCase=[2, 16, 16] , __UpperCAmelCase=3 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3_072 , __UpperCAmelCase="gelu_fast" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-06 , __UpperCAmelCase=True , **__UpperCAmelCase , ):
__A : Tuple = hidden_size
__A : Tuple = num_hidden_layers
__A : Any = num_attention_heads
__A : str = intermediate_size
__A : Any = hidden_act
__A : int = hidden_dropout_prob
__A : List[str] = attention_probs_dropout_prob
__A : Union[str, Any] = initializer_range
__A : Tuple = layer_norm_eps
__A : Dict = image_size
__A : Any = num_frames
__A : Optional[int] = tubelet_size
__A : Optional[Any] = num_channels
__A : Dict = qkv_bias
super().__init__(**__UpperCamelCase )
| 719
|
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'vocab_file': 'spiece.model'}
UpperCamelCase = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
}
}
# TODO(PVP) - this should be removed in Transformers v5
UpperCamelCase = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
UpperCamelCase = '▁'
class _a ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Tuple = VOCAB_FILES_NAMES
lowerCamelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : List[str] = ["""input_ids""", """attention_mask"""]
def __init__( self , __UpperCAmelCase , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase=100 , __UpperCAmelCase=None , __UpperCAmelCase = None , __UpperCAmelCase=True , **__UpperCAmelCase , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__A : Dict = [F"<extra_id_{i}>" for i in range(__UpperCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__A : Any = len(set(filter(lambda __UpperCAmelCase : bool("extra_id" in str(__UpperCAmelCase ) ) , __UpperCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
if legacy:
logger.warning_once(
F"You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565" )
__A : Tuple = legacy
__A : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , extra_ids=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , legacy=__UpperCAmelCase , **__UpperCAmelCase , )
__A : Optional[Any] = vocab_file
__A : List[str] = extra_ids
__A : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
@staticmethod
def __UpperCAmelCase( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
__A : Union[str, Any] = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
F" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
F" {pretrained_model_name_or_path} automatically truncating your input to"
F" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
F" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , __UpperCAmelCase , )
return max_model_length
@property
def __UpperCAmelCase( self ):
return self.sp_model.get_piece_size() + self._extra_ids
def __UpperCAmelCase( self ):
__A : List[str] = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__UpperCAmelCase )) + [1]
return ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1]
def __UpperCAmelCase( self ):
return list(
set(filter(lambda __UpperCAmelCase : bool(re.search(r"<extra_id_\d+>" , __UpperCAmelCase ) ) is not None , self.additional_special_tokens ) ) )
def __UpperCAmelCase( self ):
return [self._convert_token_to_id(__UpperCAmelCase ) for token in self.get_sentinel_tokens()]
def __UpperCAmelCase( self , __UpperCAmelCase ):
if len(__UpperCAmelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase = None ):
__A : List[str] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase = None ):
__A : List[Any] = self._add_eos_if_not_present(__UpperCAmelCase )
if token_ids_a is None:
return token_ids_a
else:
__A : str = self._add_eos_if_not_present(__UpperCAmelCase )
return token_ids_a + token_ids_a
def __getstate__( self ):
__A : Any = self.__dict__.copy()
__A : List[str] = None
return state
def __setstate__( self , __UpperCAmelCase ):
__A : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__A : Optional[int] = {}
__A : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase( self , __UpperCAmelCase , **__UpperCAmelCase ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
__A : List[str] = SPIECE_UNDERLINE + text.replace(__UpperCAmelCase , " " )
return super().tokenize(__UpperCAmelCase , **__UpperCAmelCase )
def __UpperCAmelCase( self , __UpperCAmelCase , **__UpperCAmelCase ):
if not self.legacy:
__A : Tuple = text.startswith(__UpperCAmelCase )
if is_first:
__A : Optional[int] = text[1:]
__A : Optional[Any] = self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
if not self.legacy and not is_first and not text.startswith(" " ) and tokens[0].startswith(__UpperCAmelCase ):
__A : Tuple = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def __UpperCAmelCase( self , __UpperCAmelCase ):
if token.startswith("<extra_id_" ):
__A : Optional[Any] = re.match(r"<extra_id_(\d+)>" , __UpperCAmelCase )
__A : Optional[Any] = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(__UpperCAmelCase )
def __UpperCAmelCase( self , __UpperCAmelCase ):
if index < self.sp_model.get_piece_size():
__A : Union[str, Any] = self.sp_model.IdToPiece(__UpperCAmelCase )
else:
__A : List[Any] = F"<extra_id_{self.vocab_size - 1 - index}>"
return token
def __UpperCAmelCase( self , __UpperCAmelCase ):
__A : int = []
__A : List[Any] = ""
__A : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
__A : Tuple = True
__A : Any = []
else:
current_sub_tokens.append(__UpperCAmelCase )
__A : Optional[int] = False
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string.strip()
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase = None ):
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__A : Union[str, Any] = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , "wb" ) as fi:
__A : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 387
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class A ( unittest.TestCase ):
def __init__( self : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[Any]=7 , __magic_name__ : Optional[Any]=3 , __magic_name__ : Union[str, Any]=18 , __magic_name__ : Optional[int]=30 , __magic_name__ : int=400 , __magic_name__ : Any=True , __magic_name__ : Tuple=None , __magic_name__ : Optional[int]=True , __magic_name__ : str=None , __magic_name__ : Optional[Any]=True , ):
"""simple docstring"""
lowerCAmelCase__ = size if size is not None else {"shortest_edge": 20}
lowerCAmelCase__ = crop_size if crop_size is not None else {"height": 18, "width": 18}
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = image_size
lowerCAmelCase__ = min_resolution
lowerCAmelCase__ = max_resolution
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = do_center_crop
lowerCAmelCase__ = crop_size
lowerCAmelCase__ = do_flip_channel_order
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :Dict = MobileViTImageProcessor if is_vision_available() else None
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = MobileViTImageProcessingTester(self )
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , "do_resize" ) )
self.assertTrue(hasattr(__magic_name__ , "size" ) )
self.assertTrue(hasattr(__magic_name__ , "do_center_crop" ) )
self.assertTrue(hasattr(__magic_name__ , "center_crop" ) )
self.assertTrue(hasattr(__magic_name__ , "do_flip_channel_order" ) )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
lowerCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCAmelCase__ = image_processing(__magic_name__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCAmelCase__ = image_processing(__magic_name__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCAmelCase__ = image_processing(__magic_name__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 48
|
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : List[Any] ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ = 0
if start < end:
lowerCAmelCase__ = randint(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = a[end]
lowerCAmelCase__ = a[pivot]
lowerCAmelCase__ = temp
lowerCAmelCase__ ,lowerCAmelCase__ = _in_place_partition(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
count += _in_place_quick_sort(UpperCamelCase_ , UpperCamelCase_ , p - 1 )
count += _in_place_quick_sort(UpperCamelCase_ , p + 1 , UpperCamelCase_ )
return count
def A ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ = 0
lowerCAmelCase__ = randint(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = a[end]
lowerCAmelCase__ = a[pivot]
lowerCAmelCase__ = temp
lowerCAmelCase__ = start - 1
for index in range(UpperCamelCase_ , UpperCamelCase_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowerCAmelCase__ = new_pivot_index + 1
lowerCAmelCase__ = a[new_pivot_index]
lowerCAmelCase__ = a[index]
lowerCAmelCase__ = temp
lowerCAmelCase__ = a[new_pivot_index + 1]
lowerCAmelCase__ = a[end]
lowerCAmelCase__ = temp
return new_pivot_index + 1, count
UpperCAmelCase__ : Tuple = TemporaryFile()
UpperCAmelCase__ : List[str] = 1_00 # 1000 elements are to be sorted
UpperCAmelCase__ , UpperCAmelCase__ : Dict = 0, 1 # mean and standard deviation
UpperCAmelCase__ : Tuple = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
UpperCAmelCase__ : Optional[Any] = np.load(outfile)
UpperCAmelCase__ : Any = len(M) - 1
UpperCAmelCase__ : Tuple = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 48
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
lowercase__ = None
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
lowercase__ = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
lowercase__ = {
"""moussaKam/mbarthez""": 1024,
"""moussaKam/barthez""": 1024,
"""moussaKam/barthez-orangesum-title""": 1024,
}
lowercase__ = """▁"""
class __snake_case ( _lowerCAmelCase ):
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ['input_ids', 'attention_mask']
a__ = BarthezTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , **lowercase , ) -> int:
'''simple docstring'''
a__: List[str] = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase) if isinstance(_lowerCAmelCase , _lowerCAmelCase) else mask_token
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , **_lowerCAmelCase , )
a__: int = vocab_file
a__: List[str] = False if not self.vocab_file else True
def lowerCamelCase_ ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a__: Dict = [self.cls_token_id]
a__: int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase_ ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
a__: str = [self.sep_token_id]
a__: List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def lowerCamelCase_ ( self , lowercase , lowercase = None) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(_lowerCAmelCase):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
a__: Union[str, Any] = os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(_lowerCAmelCase):
copyfile(self.vocab_file , _lowerCAmelCase)
return (out_vocab_file,)
| 709
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class __snake_case ( __lowerCAmelCase ):
a__ = """speech_to_text"""
a__ = ["""past_key_values"""]
a__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , lowercase=1_00_00 , lowercase=12 , lowercase=20_48 , lowercase=4 , lowercase=6 , lowercase=20_48 , lowercase=4 , lowercase=0.0 , lowercase=0.0 , lowercase=True , lowercase=True , lowercase="relu" , lowercase=2_56 , lowercase=0.1 , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=2 , lowercase=True , lowercase=1 , lowercase=0 , lowercase=2 , lowercase=60_00 , lowercase=10_24 , lowercase=2 , lowercase=(5, 5) , lowercase=10_24 , lowercase=80 , lowercase=1 , **lowercase , ) -> List[str]:
'''simple docstring'''
a__: int = vocab_size
a__: Any = d_model
a__: List[str] = encoder_ffn_dim
a__: int = encoder_layers
a__: int = encoder_attention_heads
a__: int = decoder_ffn_dim
a__: Optional[int] = decoder_layers
a__: Optional[Any] = decoder_attention_heads
a__: str = dropout
a__: List[Any] = attention_dropout
a__: Union[str, Any] = activation_dropout
a__: Tuple = activation_function
a__: Optional[Any] = init_std
a__: List[str] = encoder_layerdrop
a__: Optional[int] = decoder_layerdrop
a__: Union[str, Any] = use_cache
a__: Union[str, Any] = encoder_layers
a__: str = scale_embedding # scale factor will be sqrt(d_model) if True
a__: Tuple = max_source_positions
a__: Union[str, Any] = max_target_positions
a__: List[str] = num_conv_layers
a__: Union[str, Any] = list(lowercase)
a__: Dict = conv_channels
a__: List[Any] = input_feat_per_channel
a__: Any = input_channels
if len(self.conv_kernel_sizes) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
f'but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes)}`, '
f'`config.num_conv_layers = {self.num_conv_layers}`.')
super().__init__(
pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , is_encoder_decoder=lowercase , decoder_start_token_id=lowercase , **lowercase , )
| 217
| 0
|
from manim import *
class lowercase ( A__ ):
'''simple docstring'''
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase = [mem.copy() for i in range(6 )]
UpperCAmelCase = [mem.copy() for i in range(6 )]
UpperCAmelCase = VGroup(*_snake_case ).arrange(_snake_case , buff=0 )
UpperCAmelCase = VGroup(*_snake_case ).arrange(_snake_case , buff=0 )
UpperCAmelCase = VGroup(_snake_case , _snake_case ).arrange(_snake_case , buff=0 )
UpperCAmelCase = Text('''CPU''' , font_size=24 )
UpperCAmelCase = Group(_snake_case , _snake_case ).arrange(_snake_case , buff=0.5 , aligned_edge=_snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_snake_case )
UpperCAmelCase = [mem.copy() for i in range(1 )]
UpperCAmelCase = VGroup(*_snake_case ).arrange(_snake_case , buff=0 )
UpperCAmelCase = Text('''GPU''' , font_size=24 )
UpperCAmelCase = Group(_snake_case , _snake_case ).arrange(_snake_case , buff=0.5 , aligned_edge=_snake_case )
gpu.align_to(_snake_case , _snake_case )
gpu.set_x(gpu.get_x() - 1 )
self.add(_snake_case )
UpperCAmelCase = [mem.copy() for i in range(6 )]
UpperCAmelCase = VGroup(*_snake_case ).arrange(_snake_case , buff=0 )
UpperCAmelCase = Text('''Model''' , font_size=24 )
UpperCAmelCase = Group(_snake_case , _snake_case ).arrange(_snake_case , buff=0.5 , aligned_edge=_snake_case )
model.move_to([3, -1.0, 0] )
self.play(
Create(_snake_case , run_time=1 ) , Create(_snake_case , run_time=1 ) , Create(_snake_case , run_time=1 ) , )
UpperCAmelCase = MarkupText(
f"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , )
UpperCAmelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_snake_case , run_time=2.5 ) , Write(_snake_case ) , Write(_snake_case ) )
self.add(_snake_case )
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = []
for i, rect in enumerate(_snake_case ):
UpperCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_snake_case , opacity=0.7 )
cpu_target.move_to(_snake_case )
cpu_target.generate_target()
UpperCAmelCase = 0.46 / 4
UpperCAmelCase = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_snake_case )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=_snake_case , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=_snake_case , buff=0.0 )
cpu_targs.append(_snake_case )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_snake_case ) )
second_animations.append(MoveToTarget(_snake_case , run_time=1.5 ) )
self.play(*_snake_case )
self.play(*_snake_case )
self.wait()
| 254
|
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class lowercase ( A__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """encodec"""
def __init__( self , _snake_case=[1.5, 3.0, 6.0, 12.0, 24.0] , _snake_case=2_4000 , _snake_case=1 , _snake_case=False , _snake_case=None , _snake_case=None , _snake_case=128 , _snake_case=32 , _snake_case=1 , _snake_case=[8, 5, 4, 2] , _snake_case="weight_norm" , _snake_case=7 , _snake_case=7 , _snake_case=3 , _snake_case=2 , _snake_case=True , _snake_case="reflect" , _snake_case=2 , _snake_case=2 , _snake_case=1.0 , _snake_case=1024 , _snake_case=None , _snake_case=True , **_snake_case , ) -> Dict:
"""simple docstring"""
UpperCAmelCase = target_bandwidths
UpperCAmelCase = sampling_rate
UpperCAmelCase = audio_channels
UpperCAmelCase = normalize
UpperCAmelCase = chunk_length_s
UpperCAmelCase = overlap
UpperCAmelCase = hidden_size
UpperCAmelCase = num_filters
UpperCAmelCase = num_residual_layers
UpperCAmelCase = upsampling_ratios
UpperCAmelCase = norm_type
UpperCAmelCase = kernel_size
UpperCAmelCase = last_kernel_size
UpperCAmelCase = residual_kernel_size
UpperCAmelCase = dilation_growth_rate
UpperCAmelCase = use_causal_conv
UpperCAmelCase = pad_mode
UpperCAmelCase = compress
UpperCAmelCase = num_lstm_layers
UpperCAmelCase = trim_right_ratio
UpperCAmelCase = codebook_size
UpperCAmelCase = codebook_dim if codebook_dim is not None else hidden_size
UpperCAmelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**_snake_case )
@property
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def snake_case_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def snake_case_ ( self ) -> int:
"""simple docstring"""
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 254
| 1
|
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_lowercase : List[Any] = 16
_lowercase : Optional[Any] = 32
def lowercase__ ( snake_case_ :Accelerator , snake_case_ :int = 16 , snake_case_ :str = "bert-base-cased" ):
__UpperCAmelCase = AutoTokenizer.from_pretrained(a_ )
__UpperCAmelCase = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(snake_case_ :Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
__UpperCAmelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=a_ , max_length=a_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__UpperCAmelCase = datasets.map(
a_ , batched=a_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=a_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCAmelCase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case_ :str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(a_ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(a_ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__UpperCAmelCase = DataLoader(
tokenized_datasets['''train'''] , shuffle=a_ , collate_fn=a_ , batch_size=a_ )
__UpperCAmelCase = DataLoader(
tokenized_datasets['''validation'''] , shuffle=a_ , collate_fn=a_ , batch_size=a_ )
return train_dataloader, eval_dataloader
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Optional[int] , snake_case_ :Union[str, Any] , snake_case_ :Dict ):
model.eval()
__UpperCAmelCase = 0
for step, batch in enumerate(a_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCAmelCase = model(**a_ )
__UpperCAmelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__UpperCAmelCase = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(a_ ) - 1:
__UpperCAmelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__UpperCAmelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=a_ , references=a_ , )
__UpperCAmelCase = metric.compute()
return eval_metric["accuracy"]
def lowercase__ ( snake_case_ :Tuple , snake_case_ :int ):
# Initialize accelerator
__UpperCAmelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCAmelCase = config['''lr''']
__UpperCAmelCase = int(config['''num_epochs'''] )
__UpperCAmelCase = int(config['''seed'''] )
__UpperCAmelCase = int(config['''batch_size'''] )
__UpperCAmelCase = args.model_name_or_path
set_seed(a_ )
__UpperCAmelCase = get_dataloaders(a_ , a_ , a_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained(a_ , return_dict=a_ )
# Instantiate optimizer
__UpperCAmelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__UpperCAmelCase = optimizer_cls(params=model.parameters() , lr=a_ )
if accelerator.state.deepspeed_plugin is not None:
__UpperCAmelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__UpperCAmelCase = 1
__UpperCAmelCase = (len(a_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__UpperCAmelCase = get_linear_schedule_with_warmup(
optimizer=a_ , num_warmup_steps=0 , num_training_steps=a_ , )
else:
__UpperCAmelCase = DummyScheduler(a_ , total_num_steps=a_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCAmelCase = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ )
# We need to keep track of how many total steps we have iterated over
__UpperCAmelCase = 0
# We also need to keep track of the stating epoch so files are named properly
__UpperCAmelCase = 0
__UpperCAmelCase = evaluate.load('''glue''' , '''mrpc''' )
__UpperCAmelCase = num_epochs
if args.partial_train_epoch is not None:
__UpperCAmelCase = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__UpperCAmelCase = args.resume_from_checkpoint.split('''epoch_''' )[1]
__UpperCAmelCase = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__UpperCAmelCase = int(a_ ) + 1
__UpperCAmelCase = evaluation_loop(a_ , a_ , a_ , a_ )
accelerator.print('''resumed checkpoint performance:''' , a_ )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , F'''state_{starting_epoch-1}.json''' ) , '''r''' ) as f:
__UpperCAmelCase = json.load(a_ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__UpperCAmelCase = {}
for epoch in range(a_ , a_ ):
model.train()
for step, batch in enumerate(a_ ):
__UpperCAmelCase = model(**a_ )
__UpperCAmelCase = outputs.loss
__UpperCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(a_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__UpperCAmelCase = F'''epoch_{epoch}'''
__UpperCAmelCase = os.path.join(args.output_dir , a_ )
accelerator.save_state(a_ )
__UpperCAmelCase = evaluation_loop(a_ , a_ , a_ , a_ )
__UpperCAmelCase = accuracy
__UpperCAmelCase = lr_scheduler.get_lr()[0]
__UpperCAmelCase = optimizer.param_groups[0]['''lr''']
__UpperCAmelCase = epoch
__UpperCAmelCase = overall_step
accelerator.print(F'''epoch {epoch}:''' , a_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'''state_{epoch}.json''' ) , '''w''' ) as f:
json.dump(a_ , a_ )
def lowercase__ ( ):
__UpperCAmelCase = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=a_ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=a_ , )
parser.add_argument(
'''--output_dir''' , type=a_ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=a_ , default=a_ , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=a_ , default=a_ , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=a_ , default=2 , help='''Number of train epochs.''' , )
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(a_ , a_ )
if __name__ == "__main__":
main()
| 705
|
"""simple docstring"""
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : List[Any] = {
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 397
| 0
|
from __future__ import annotations
import numpy as np
def _lowerCAmelCase ( _lowerCAmelCase ) -> int:
'''simple docstring'''
return np.maximum(0 , _lowerCAmelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 371
|
import requests
from bsa import BeautifulSoup
def _lowerCAmelCase ( _lowerCAmelCase = "AAPL" ) -> str:
'''simple docstring'''
__snake_case = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
__snake_case = BeautifulSoup(requests.get(_lowerCAmelCase ).text , "html.parser" )
__snake_case = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 371
| 1
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__A : Union[str, Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
__A : List[str] = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
__A : List[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""})
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""})
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={"""help""": """The column name of the images in the files. If not set, will try to use 'image' or 'img'."""} , )
UpperCamelCase__ = field(default=lowerCAmelCase , metadata={"""help""": """A folder containing the training data."""})
UpperCamelCase__ = field(default=lowerCAmelCase , metadata={"""help""": """A folder containing the validation data."""})
UpperCamelCase__ = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""})
UpperCamelCase__ = field(default=32 , metadata={"""help""": """The size of the square patches to use for masking."""})
UpperCamelCase__ = field(
default=0.6 , metadata={"""help""": """Percentage of patches to mask."""} , )
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def lowercase__ ( self : Union[str, Any] )->Union[str, Any]:
_UpperCAmelCase = {}
if self.train_dir is not None:
_UpperCAmelCase = self.train_dir
if self.validation_dir is not None:
_UpperCAmelCase = self.validation_dir
_UpperCAmelCase = data_files if data_files else None
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={
"""help""": (
"""The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a """
"""checkpoint identifier on the hub. """
"""Don't set if you want to train a model from scratch."""
)
} , )
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(lowerCAmelCase)} , )
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""})
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={"""help""": """Where do you want to store (cache) the pretrained models/datasets downloaded from the hub"""} , )
UpperCamelCase__ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
UpperCamelCase__ = field(default=lowerCAmelCase , metadata={"""help""": """Name or path of preprocessor config."""})
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={
"""help""": (
"""The size (resolution) of each image. If not specified, will use `image_size` of the configuration."""
)
} , )
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={
"""help""": (
"""The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration."""
)
} , )
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={"""help""": """Stride to use for the encoder."""} , )
class _a :
"""simple docstring"""
def __init__( self : Dict , __UpperCamelCase : Tuple=1_9_2 , __UpperCamelCase : Optional[Any]=3_2 , __UpperCamelCase : Tuple=4 , __UpperCamelCase : Tuple=0.6 )->Optional[int]:
_UpperCAmelCase = input_size
_UpperCAmelCase = mask_patch_size
_UpperCAmelCase = model_patch_size
_UpperCAmelCase = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('''Input size must be divisible by mask patch size''' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('''Mask patch size must be divisible by model patch size''' )
_UpperCAmelCase = self.input_size // self.mask_patch_size
_UpperCAmelCase = self.mask_patch_size // self.model_patch_size
_UpperCAmelCase = self.rand_size**2
_UpperCAmelCase = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : Dict )->List[str]:
_UpperCAmelCase = np.random.permutation(self.token_count )[: self.mask_count]
_UpperCAmelCase = np.zeros(self.token_count , dtype=__UpperCamelCase )
_UpperCAmelCase = 1
_UpperCAmelCase = mask.reshape((self.rand_size, self.rand_size) )
_UpperCAmelCase = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
_UpperCAmelCase = torch.stack([example['''pixel_values'''] for example in examples] )
_UpperCAmelCase = torch.stack([example['''mask'''] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mim''' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(_SCREAMING_SNAKE_CASE )
transformers.utils.logging.set_verbosity(_SCREAMING_SNAKE_CASE )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
_UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
_UpperCAmelCase = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _SCREAMING_SNAKE_CASE ) and data_args.train_val_split > 0.0:
_UpperCAmelCase = ds['''train'''].train_test_split(data_args.train_val_split )
_UpperCAmelCase = split['''train''']
_UpperCAmelCase = split['''test''']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.config_name_or_path , **_SCREAMING_SNAKE_CASE )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , **_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(f'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(f'New config: {config}' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(_SCREAMING_SNAKE_CASE , '''decoder_type''' ):
_UpperCAmelCase = '''simmim'''
# adapt config
_UpperCAmelCase = model_args.image_size if model_args.image_size is not None else config.image_size
_UpperCAmelCase = model_args.patch_size if model_args.patch_size is not None else config.patch_size
_UpperCAmelCase = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'''image_size''': model_args.image_size,
'''patch_size''': model_args.patch_size,
'''encoder_stride''': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
_UpperCAmelCase = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **_SCREAMING_SNAKE_CASE )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
_UpperCAmelCase = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
_UpperCAmelCase = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
_UpperCAmelCase = AutoModelForMaskedImageModeling.from_config(_SCREAMING_SNAKE_CASE )
if training_args.do_train:
_UpperCAmelCase = ds['''train'''].column_names
else:
_UpperCAmelCase = ds['''validation'''].column_names
if data_args.image_column_name is not None:
_UpperCAmelCase = data_args.image_column_name
elif "image" in column_names:
_UpperCAmelCase = '''image'''
elif "img" in column_names:
_UpperCAmelCase = '''img'''
else:
_UpperCAmelCase = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
_UpperCAmelCase = Compose(
[
Lambda(lambda _SCREAMING_SNAKE_CASE : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
_UpperCAmelCase = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(_SCREAMING_SNAKE_CASE : Dict ):
_UpperCAmelCase = [transforms(_SCREAMING_SNAKE_CASE ) for image in examples[image_column_name]]
_UpperCAmelCase = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
_UpperCAmelCase = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_SCREAMING_SNAKE_CASE )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
_UpperCAmelCase = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_SCREAMING_SNAKE_CASE )
# Initialize our trainer
_UpperCAmelCase = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
_UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase = last_checkpoint
_UpperCAmelCase = trainer.train(resume_from_checkpoint=_SCREAMING_SNAKE_CASE )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_UpperCAmelCase = trainer.evaluate()
trainer.log_metrics('''eval''' , _SCREAMING_SNAKE_CASE )
trainer.save_metrics('''eval''' , _SCREAMING_SNAKE_CASE )
# Write model card and (optionally) push to hub
_UpperCAmelCase = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''masked-image-modeling''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-image-modeling'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_SCREAMING_SNAKE_CASE )
else:
trainer.create_model_card(**_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 716
|
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
_UpperCAmelCase = SwinConfig()
_UpperCAmelCase = swin_name.split('''_''' )
_UpperCAmelCase = name_split[1]
_UpperCAmelCase = int(name_split[4] )
_UpperCAmelCase = int(name_split[3][-1] )
if model_size == "tiny":
_UpperCAmelCase = 96
_UpperCAmelCase = (2, 2, 6, 2)
_UpperCAmelCase = (3, 6, 12, 24)
elif model_size == "small":
_UpperCAmelCase = 96
_UpperCAmelCase = (2, 2, 18, 2)
_UpperCAmelCase = (3, 6, 12, 24)
elif model_size == "base":
_UpperCAmelCase = 128
_UpperCAmelCase = (2, 2, 18, 2)
_UpperCAmelCase = (4, 8, 16, 32)
else:
_UpperCAmelCase = 192
_UpperCAmelCase = (2, 2, 18, 2)
_UpperCAmelCase = (6, 12, 24, 48)
if "in22k" in swin_name:
_UpperCAmelCase = 2_1841
else:
_UpperCAmelCase = 1000
_UpperCAmelCase = '''huggingface/label-files'''
_UpperCAmelCase = '''imagenet-1k-id2label.json'''
_UpperCAmelCase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
_UpperCAmelCase = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
_UpperCAmelCase = img_size
_UpperCAmelCase = num_classes
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
return config
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if "patch_embed.proj" in name:
_UpperCAmelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_UpperCAmelCase = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
_UpperCAmelCase = '''encoder.''' + name
if "attn.proj" in name:
_UpperCAmelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
_UpperCAmelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
_UpperCAmelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_UpperCAmelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
_UpperCAmelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_UpperCAmelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "norm.weight":
_UpperCAmelCase = '''layernorm.weight'''
if name == "norm.bias":
_UpperCAmelCase = '''layernorm.bias'''
if "head" in name:
_UpperCAmelCase = name.replace('''head''' , '''classifier''' )
else:
_UpperCAmelCase = '''swin.''' + name
return name
def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_UpperCAmelCase = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "mask" in key:
continue
elif "qkv" in key:
_UpperCAmelCase = key.split('''.''' )
_UpperCAmelCase = int(key_split[1] )
_UpperCAmelCase = int(key_split[3] )
_UpperCAmelCase = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_UpperCAmelCase = val[:dim, :]
_UpperCAmelCase = val[
dim : dim * 2, :
]
_UpperCAmelCase = val[-dim:, :]
else:
_UpperCAmelCase = val[
:dim
]
_UpperCAmelCase = val[
dim : dim * 2
]
_UpperCAmelCase = val[
-dim:
]
else:
_UpperCAmelCase = val
return orig_state_dict
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
_UpperCAmelCase = get_swin_config(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = SwinForImageClassification(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = convert_state_dict(timm_model.state_dict() , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCAmelCase = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swin_name.replace('''_''' , '''-''' ) ) )
_UpperCAmelCase = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
_UpperCAmelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
_UpperCAmelCase = timm_model(inputs['''pixel_values'''] )
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 )
print(f'Saving model {swin_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__A : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swin_name",
default="swin_tiny_patch4_window7_224",
type=str,
help="Name of the Swin timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__A : Tuple = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 95
| 0
|
from __future__ import annotations
def _lowerCAmelCase ( UpperCamelCase__: list[int] ) -> list[int]:
"""simple docstring"""
if len(UpperCamelCase__ ) == 0:
return array
A , A = min(UpperCamelCase__ ), max(UpperCamelCase__ )
# Compute the variables
A = _max - _min + 1
A , A = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
A = i - _min
A = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
A = 0
for i in range(UpperCamelCase__ ):
while holes_repeat[i] > 0:
A = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : Tuple = input("Enter numbers separated by comma:\n")
_lowercase : Optional[Any] = [int(x) for x in user_input.split(",")]
print(pigeon_sort(unsorted))
| 641
|
import numpy as np
from transformers import Pipeline
def _lowerCAmelCase ( UpperCamelCase__: Optional[Any] ) -> Optional[int]:
"""simple docstring"""
A = np.max(UpperCamelCase__ , axis=-1 , keepdims=UpperCamelCase__ )
A = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=UpperCamelCase__ )
class _UpperCamelCase ( __snake_case ):
"""simple docstring"""
def _UpperCAmelCase ( self , **a__ ) -> Union[str, Any]:
A = {}
if "second_text" in kwargs:
A = kwargs["""second_text"""]
return preprocess_kwargs, {}, {}
def _UpperCAmelCase ( self , a__ , a__=None ) -> str:
return self.tokenizer(a__ , text_pair=a__ , return_tensors=self.framework )
def _UpperCAmelCase ( self , a__ ) -> Tuple:
return self.model(**a__ )
def _UpperCAmelCase ( self , a__ ) -> Optional[int]:
A = model_outputs.logits[0].numpy()
A = softmax(a__ )
A = np.argmax(a__ )
A = self.model.config.idalabel[best_class]
A = probabilities[best_class].item()
A = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 641
| 1
|
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class lowerCamelCase__ :
def __init__( self : str , UpperCamelCase : int , UpperCamelCase : Union[str, Any]=sys.maxsize ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = '''bilinear'''
__UpperCAmelCase : Optional[Any] = max_size
__UpperCAmelCase : List[Any] = short_edge_length
def __call__( self : int , UpperCamelCase : Dict ):
'''simple docstring'''
__UpperCAmelCase : Any = []
for img in imgs:
__UpperCAmelCase : Optional[Any] = img.shape[:2]
# later: provide list and randomly choose index for resize
__UpperCAmelCase : str = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
__UpperCAmelCase : int = size * 1.0 / min(UpperCamelCase , UpperCamelCase )
if h < w:
__UpperCAmelCase : Dict = size, scale * w
else:
__UpperCAmelCase : str = scale * h, size
if max(UpperCamelCase , UpperCamelCase ) > self.max_size:
__UpperCAmelCase : Any = self.max_size * 1.0 / max(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Tuple = newh * scale
__UpperCAmelCase : Any = neww * scale
__UpperCAmelCase : List[str] = int(neww + 0.5 )
__UpperCAmelCase : List[str] = int(newh + 0.5 )
if img.dtype == np.uinta:
__UpperCAmelCase : Optional[int] = Image.fromarray(UpperCamelCase )
__UpperCAmelCase : List[str] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
__UpperCAmelCase : Optional[Any] = np.asarray(UpperCamelCase )
else:
__UpperCAmelCase : Any = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
__UpperCAmelCase : Union[str, Any] = nn.functional.interpolate(
UpperCamelCase , (newh, neww) , mode=self.interp_method , align_corners=UpperCamelCase ).squeeze(0 )
img_augs.append(UpperCamelCase )
return img_augs
class lowerCamelCase__ :
def __init__( self : Tuple , UpperCamelCase : Dict ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
__UpperCAmelCase : Any = cfg.INPUT.FORMAT
__UpperCAmelCase : List[Any] = cfg.SIZE_DIVISIBILITY
__UpperCAmelCase : List[str] = cfg.PAD_VALUE
__UpperCAmelCase : Dict = cfg.INPUT.MAX_SIZE_TEST
__UpperCAmelCase : Optional[Any] = cfg.MODEL.DEVICE
__UpperCAmelCase : int = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
__UpperCAmelCase : Tuple = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
__UpperCAmelCase : int = lambda UpperCamelCase : (x - self.pixel_mean) / self.pixel_std
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = tuple(max(UpperCamelCase ) for s in zip(*[img.shape for img in images] ) )
__UpperCAmelCase : Union[str, Any] = [im.shape[-2:] for im in images]
__UpperCAmelCase : Tuple = [
nn.functional.pad(
UpperCamelCase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(UpperCamelCase , UpperCamelCase )
]
return torch.stack(UpperCamelCase ), torch.tensor(UpperCamelCase )
def __call__( self : List[Any] , UpperCamelCase : str , UpperCamelCase : Union[str, Any]=False ):
'''simple docstring'''
with torch.no_grad():
if not isinstance(UpperCamelCase , UpperCamelCase ):
__UpperCAmelCase : Any = [images]
if single_image:
assert len(UpperCamelCase ) == 1
for i in range(len(UpperCamelCase ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(UpperCamelCase , images.pop(UpperCamelCase ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
UpperCamelCase , torch.as_tensor(img_tensorize(images.pop(UpperCamelCase ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
__UpperCAmelCase : Union[str, Any] = torch.tensor([im.shape[:2] for im in images] )
__UpperCAmelCase : Dict = self.aug(UpperCamelCase )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
__UpperCAmelCase : List[str] = [self.normalizer(UpperCamelCase ) for x in images]
# now pad them to do the following operations
__UpperCAmelCase : Dict = self.pad(UpperCamelCase )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
__UpperCAmelCase : Union[str, Any] = torch.true_divide(UpperCamelCase , UpperCamelCase )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def lowerCamelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Tuple[int, int] ) -> Union[str, Any]:
'''simple docstring'''
assert torch.isfinite(__A ).all(), "Box tensor contains infinite or NaN!"
__UpperCAmelCase : Optional[Any] = box_size
tensor[:, 0].clamp_(min=0 , max=__A )
tensor[:, 1].clamp_(min=0 , max=__A )
tensor[:, 2].clamp_(min=0 , max=__A )
tensor[:, 3].clamp_(min=0 , max=__A )
| 708
|
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase ( _UpperCamelCase : int ) -> list[int]:
'''simple docstring'''
__UpperCAmelCase : Tuple = 2
__UpperCAmelCase : Optional[Any] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_UpperCamelCase )
if n > 1:
factors.append(_UpperCamelCase )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299
| 0
|
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE :Tuple = logging.get_logger(__name__)
def UpperCAmelCase_ ( __lowercase : str ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = OrderedDict()
for key, value in state_dict.items():
if key.startswith("module.encoder" ):
_UpperCAmelCase = key.replace("module.encoder" , "glpn.encoder" )
if key.startswith("module.decoder" ):
_UpperCAmelCase = key.replace("module.decoder" , "decoder.stages" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_UpperCAmelCase = key[key.find("patch_embed" ) + len("patch_embed" )]
_UpperCAmelCase = key.replace(f'patch_embed{idx}' , f'patch_embeddings.{int(__lowercase )-1}' )
if "norm" in key:
_UpperCAmelCase = key.replace("norm" , "layer_norm" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_UpperCAmelCase = key[key.find("glpn.encoder.layer_norm" ) + len("glpn.encoder.layer_norm" )]
_UpperCAmelCase = key.replace(f'layer_norm{idx}' , f'layer_norm.{int(__lowercase )-1}' )
if "layer_norm1" in key:
_UpperCAmelCase = key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
_UpperCAmelCase = key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
_UpperCAmelCase = key[key.find("block" ) + len("block" )]
_UpperCAmelCase = key.replace(f'block{idx}' , f'block.{int(__lowercase )-1}' )
if "attn.q" in key:
_UpperCAmelCase = key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
_UpperCAmelCase = key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
_UpperCAmelCase = key.replace("attn" , "attention.self" )
if "fc1" in key:
_UpperCAmelCase = key.replace("fc1" , "dense1" )
if "fc2" in key:
_UpperCAmelCase = key.replace("fc2" , "dense2" )
if "linear_pred" in key:
_UpperCAmelCase = key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
_UpperCAmelCase = key.replace("linear_fuse.conv" , "linear_fuse" )
_UpperCAmelCase = key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_UpperCAmelCase = key[key.find("linear_c" ) + len("linear_c" )]
_UpperCAmelCase = key.replace(f'linear_c{idx}' , f'linear_c.{int(__lowercase )-1}' )
if "bot_conv" in key:
_UpperCAmelCase = key.replace("bot_conv" , "0.convolution" )
if "skip_conv1" in key:
_UpperCAmelCase = key.replace("skip_conv1" , "1.convolution" )
if "skip_conv2" in key:
_UpperCAmelCase = key.replace("skip_conv2" , "2.convolution" )
if "fusion1" in key:
_UpperCAmelCase = key.replace("fusion1" , "1.fusion" )
if "fusion2" in key:
_UpperCAmelCase = key.replace("fusion2" , "2.fusion" )
if "fusion3" in key:
_UpperCAmelCase = key.replace("fusion3" , "3.fusion" )
if "fusion" in key and "conv" in key:
_UpperCAmelCase = key.replace("conv" , "convolutional_layer" )
if key.startswith("module.last_layer_depth" ):
_UpperCAmelCase = key.replace("module.last_layer_depth" , "head.head" )
_UpperCAmelCase = value
return new_state_dict
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : Optional[int] ) -> List[str]:
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_UpperCAmelCase = state_dict.pop(f'glpn.encoder.block.{i}.{j}.attention.self.kv.weight' )
_UpperCAmelCase = state_dict.pop(f'glpn.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
_UpperCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
_UpperCAmelCase = kv_bias[: config.hidden_sizes[i]]
_UpperCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
_UpperCAmelCase = kv_bias[config.hidden_sizes[i] :]
def UpperCAmelCase_ ( ) -> int:
'''simple docstring'''
_UpperCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return image
@torch.no_grad()
def UpperCAmelCase_ ( __lowercase : Union[str, Any] , __lowercase : Tuple , __lowercase : Dict=False , __lowercase : List[Any]=None ) -> int:
'''simple docstring'''
_UpperCAmelCase = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
_UpperCAmelCase = GLPNImageProcessor()
# prepare image
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=__lowercase , return_tensors="pt" ).pixel_values
logger.info("Converting model..." )
# load original state dict
_UpperCAmelCase = torch.load(__lowercase , map_location=torch.device("cpu" ) )
# rename keys
_UpperCAmelCase = rename_keys(__lowercase )
# key and value matrices need special treatment
read_in_k_v(__lowercase , __lowercase )
# create HuggingFace model and load state dict
_UpperCAmelCase = GLPNForDepthEstimation(__lowercase )
model.load_state_dict(__lowercase )
model.eval()
# forward pass
_UpperCAmelCase = model(__lowercase )
_UpperCAmelCase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
_UpperCAmelCase = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
_UpperCAmelCase = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(f'Unknown model name: {model_name}' )
_UpperCAmelCase = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , __lowercase , atol=1E-4 )
print("Looks ok!" )
# finally, push to hub if required
if push_to_hub:
logger.info("Pushing model and image processor to the hub..." )
model.push_to_hub(
repo_path_or_name=Path(__lowercase , __lowercase ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=__lowercase , )
image_processor.push_to_hub(
repo_path_or_name=Path(__lowercase , __lowercase ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=__lowercase , )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''',
default=None,
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.'''
)
parser.add_argument(
'''--model_name''',
default='''glpn-kitti''',
type=str,
help='''Name of the model in case you\'re pushing to the hub.''',
)
__SCREAMING_SNAKE_CASE :Union[str, Any] = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 236
|
'''simple docstring'''
from manim import *
class A_ ( lowerCAmelCase_ ):
def lowercase ( self : Dict ):
_UpperCAmelCase = Rectangle(height=0.5 , width=0.5 )
_UpperCAmelCase = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
_UpperCAmelCase = Rectangle(height=0.2_5 , width=0.2_5 )
_UpperCAmelCase = [mem.copy() for i in range(6 )]
_UpperCAmelCase = [mem.copy() for i in range(6 )]
_UpperCAmelCase = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
_UpperCAmelCase = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
_UpperCAmelCase = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 )
_UpperCAmelCase = Text("CPU" , font_size=2_4 )
_UpperCAmelCase = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case_ )
_UpperCAmelCase = [mem.copy() for i in range(4 )]
_UpperCAmelCase = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
_UpperCAmelCase = Text("GPU" , font_size=2_4 )
_UpperCAmelCase = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
gpu.move_to([-1, -1, 0] )
self.add(snake_case_ )
_UpperCAmelCase = [mem.copy() for i in range(6 )]
_UpperCAmelCase = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
_UpperCAmelCase = Text("Model" , font_size=2_4 )
_UpperCAmelCase = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
model.move_to([3, -1.0, 0] )
self.add(snake_case_ )
_UpperCAmelCase = []
_UpperCAmelCase = []
for i, rect in enumerate(snake_case_ ):
_UpperCAmelCase = fill.copy().set_fill(snake_case_ , opacity=0.8 )
target.move_to(snake_case_ )
model_arr.append(snake_case_ )
_UpperCAmelCase = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(snake_case_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(snake_case_ )
self.add(*snake_case_ , *snake_case_ )
_UpperCAmelCase = [meta_mem.copy() for i in range(6 )]
_UpperCAmelCase = [meta_mem.copy() for i in range(6 )]
_UpperCAmelCase = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
_UpperCAmelCase = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
_UpperCAmelCase = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 )
_UpperCAmelCase = Text("Disk" , font_size=2_4 )
_UpperCAmelCase = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
disk.move_to([-4, -1.2_5, 0] )
self.add(snake_case_ , snake_case_ )
_UpperCAmelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_UpperCAmelCase = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(snake_case_ , snake_case_ )
_UpperCAmelCase = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=1_8 , )
blue_text.next_to(snake_case_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(snake_case_ )
_UpperCAmelCase = MarkupText(
f'Now watch as an input is passed through the model\nand how the memory is utilized and handled.' , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case_ ) )
_UpperCAmelCase = Square(0.3 )
input.set_fill(snake_case_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , snake_case_ , buff=0.5 )
self.play(Write(snake_case_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=snake_case_ , buff=0.0_2 )
self.play(MoveToTarget(snake_case_ ) )
self.play(FadeOut(snake_case_ ) )
_UpperCAmelCase = Arrow(start=snake_case_ , end=snake_case_ , color=snake_case_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , snake_case_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
_UpperCAmelCase = MarkupText(
f'As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.' , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case_ , run_time=3 ) )
_UpperCAmelCase = {"run_time": 1, "fade_in": True, "fade_out": True, "buff": 0.0_2}
self.play(
Write(snake_case_ ) , Circumscribe(model_arr[0] , color=snake_case_ , **snake_case_ ) , Circumscribe(model_cpu_arr[0] , color=snake_case_ , **snake_case_ ) , Circumscribe(gpu_rect[0] , color=snake_case_ , **snake_case_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
_UpperCAmelCase = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.0_2 , snake_case_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.0_2 )
_UpperCAmelCase = AnimationGroup(
FadeOut(snake_case_ , run_time=0.5 ) , MoveToTarget(snake_case_ , run_time=0.5 ) , FadeIn(snake_case_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(snake_case_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
_UpperCAmelCase = 0.7
self.play(
Circumscribe(model_arr[i] , **snake_case_ ) , Circumscribe(cpu_left_col_base[i] , **snake_case_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=snake_case_ , **snake_case_ ) , Circumscribe(gpu_rect[0] , color=snake_case_ , **snake_case_ ) , Circumscribe(model_arr[i + 1] , color=snake_case_ , **snake_case_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.0_2 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=snake_case_ , **snake_case_ ) , Circumscribe(cpu_left_col_base[-1] , color=snake_case_ , **snake_case_ ) , Circumscribe(gpu_rect[0] , color=snake_case_ , **snake_case_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
_UpperCAmelCase = a_c
_UpperCAmelCase = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.0_2 , buff=0.5 )
self.play(
FadeOut(snake_case_ ) , FadeOut(snake_case_ , run_time=0.5 ) , )
_UpperCAmelCase = MarkupText(f'Inference on a model too large for GPU memory\nis successfully completed.' , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case_ , run_time=3 ) , MoveToTarget(snake_case_ ) )
self.wait()
| 236
| 1
|
'''simple docstring'''
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
A ={
'text_branch': 'text_model',
'audio_branch': 'audio_model.audio_encoder',
'attn': 'attention.self',
'self.proj': 'output.dense',
'attention.self_mask': 'attn_mask',
'mlp.fc1': 'intermediate.dense',
'mlp.fc2': 'output.dense',
'norm1': 'layernorm_before',
'norm2': 'layernorm_after',
'bn0': 'batch_norm',
}
A =AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc')
def snake_case_ (_a : Any , _a : Any=False ):
UpperCAmelCase , UpperCAmelCase = create_model(
'''HTSAT-tiny''' , '''roberta''' , _a , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=_a , fusion_type='''aff_2d''' if enable_fusion else None , )
return model, model_cfg
def snake_case_ (_a : int ):
UpperCAmelCase = {}
UpperCAmelCase = R'''.*sequential.(\d+).*'''
UpperCAmelCase = R'''.*_projection.(\d+).*'''
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCAmelCase = key.replace(_a , _a )
if re.match(_a , _a ):
# replace sequential layers with list
UpperCAmelCase = re.match(_a , _a ).group(1 )
UpperCAmelCase = key.replace(F"sequential.{sequential_layer}." , F"layers.{int(_a )//3}.linear." )
elif re.match(_a , _a ):
UpperCAmelCase = int(re.match(_a , _a ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
UpperCAmelCase = 1 if projecton_layer == 0 else 2
UpperCAmelCase = key.replace(F"_projection.{projecton_layer}." , F"_projection.linear{transformers_projection_layer}." )
if "audio" and "qkv" in key:
# split qkv into query key and value
UpperCAmelCase = value
UpperCAmelCase = mixed_qkv.size(0 ) // 3
UpperCAmelCase = mixed_qkv[:qkv_dim]
UpperCAmelCase = mixed_qkv[qkv_dim : qkv_dim * 2]
UpperCAmelCase = mixed_qkv[qkv_dim * 2 :]
UpperCAmelCase = query_layer
UpperCAmelCase = key_layer
UpperCAmelCase = value_layer
else:
UpperCAmelCase = value
return model_state_dict
def snake_case_ (_a : Optional[int] , _a : Optional[int] , _a : Optional[Any] , _a : Union[str, Any]=False ):
UpperCAmelCase , UpperCAmelCase = init_clap(_a , enable_fusion=_a )
clap_model.eval()
UpperCAmelCase = clap_model.state_dict()
UpperCAmelCase = rename_state_dict(_a )
UpperCAmelCase = ClapConfig()
UpperCAmelCase = enable_fusion
UpperCAmelCase = ClapModel(_a )
# ignore the spectrogram embedding layer
model.load_state_dict(_a , strict=_a )
model.save_pretrained(_a )
transformers_config.save_pretrained(_a )
if __name__ == "__main__":
A =argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not')
A =parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 710
|
'''simple docstring'''
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
A =logging.get_logger(__name__)
@add_end_docstrings(__a )
class _a ( __a ):
def __init__( self : Optional[int] , *lowercase : Any , **lowercase : Optional[int] ):
'''simple docstring'''
super().__init__(*lowercase , **lowercase )
self.check_model_type(lowercase )
def A ( self : List[str] , lowercase : str=None , lowercase : List[str]=None , lowercase : List[str]=None , **lowercase : List[str] ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = {}, {}
if padding is not None:
UpperCAmelCase = padding
if truncation is not None:
UpperCAmelCase = truncation
if top_k is not None:
UpperCAmelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Optional[Any] , lowercase : Union["Image.Image", str] , lowercase : str = None , **lowercase : Optional[int] ):
'''simple docstring'''
if isinstance(lowercase , (Image.Image, str) ) and isinstance(lowercase , lowercase ):
UpperCAmelCase = {'''image''': image, '''question''': question}
else:
UpperCAmelCase = image
UpperCAmelCase = super().__call__(lowercase , **lowercase )
return results
def A ( self : List[Any] , lowercase : List[str] , lowercase : Any=False , lowercase : Any=False ):
'''simple docstring'''
UpperCAmelCase = load_image(inputs['''image'''] )
UpperCAmelCase = self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=lowercase , truncation=lowercase )
UpperCAmelCase = self.image_processor(images=lowercase , return_tensors=self.framework )
model_inputs.update(lowercase )
return model_inputs
def A ( self : Dict , lowercase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = self.model(**lowercase )
return model_outputs
def A ( self : Union[str, Any] , lowercase : int , lowercase : Optional[Any]=5 ):
'''simple docstring'''
if top_k > self.model.config.num_labels:
UpperCAmelCase = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase = model_outputs.logits.sigmoid()[0]
UpperCAmelCase , UpperCAmelCase = probs.topk(lowercase )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
UpperCAmelCase = scores.tolist()
UpperCAmelCase = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowercase , lowercase )]
| 358
| 0
|
from __future__ import annotations
def a__ ( lowercase__ , lowercase__ , lowercase__ , ):
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
|
from __future__ import annotations
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if len(lowercase__ ) == 0:
return False
UpperCAmelCase_ =len(lowercase__ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , lowercase__ )
else:
return binary_search(a_list[midpoint + 1 :] , lowercase__ )
if __name__ == "__main__":
__lowercase : Tuple =input("""Enter numbers separated by comma:\n""").strip()
__lowercase : Optional[Any] =[int(item.strip()) for item in user_input.split(""",""")]
__lowercase : List[Any] =int(input("""Enter the number to be found in the list:\n""").strip())
__lowercase : Optional[Any] ="""""" if binary_search(sequence, target) else """not """
print(f"""{target} was {not_str}found in {sequence}""")
| 54
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : int = {
"""configuration_upernet""": ["""UperNetConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
"""UperNetForSemanticSegmentation""",
"""UperNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
lowercase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 584
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a__ ( __SCREAMING_SNAKE_CASE ):
_A = ["image_processor", "tokenizer"]
_A = "CLIPImageProcessor"
_A = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : List[Any] , A_ : Dict=None , A_ : Optional[Any]=None , **A_ : str ) -> int:
"""simple docstring"""
lowerCamelCase_: Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , A_ , )
lowerCamelCase_: Union[str, Any] = kwargs.pop("""feature_extractor""" )
lowerCamelCase_: str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(A_ , A_ )
def __call__( self : Optional[Any] , A_ : int=None , A_ : Tuple=None , A_ : List[Any]=None , **A_ : Any ) -> int:
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
lowerCamelCase_: Optional[Any] = self.tokenizer(A_ , return_tensors=A_ , **A_ )
if images is not None:
lowerCamelCase_: List[Any] = self.image_processor(A_ , return_tensors=A_ , **A_ )
if text is not None and images is not None:
lowerCamelCase_: Dict = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A_ ) , tensor_type=A_ )
def lowerCAmelCase ( self : str , *A_ : Dict , **A_ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*A_ , **A_ )
def lowerCAmelCase ( self : Tuple , *A_ : str , **A_ : int ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*A_ , **A_ )
@property
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
lowerCamelCase_: List[Any] = self.tokenizer.model_input_names
lowerCamelCase_: Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , A_ , )
return self.image_processor_class
@property
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , A_ , )
return self.image_processor
| 584
| 1
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class A_ ( unittest.TestCase ):
def _UpperCAmelCase ( self : Any ):
__a = tempfile.mkdtemp()
__a = BlipImageProcessor()
__a = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
__a = BertTokenizerFast.from_pretrained("hf-internal-testing/tiny-random-bert" )
__a = InstructBlipProcessor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self : Tuple , **__SCREAMING_SNAKE_CASE : Dict ):
return AutoProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ).tokenizer
def _UpperCAmelCase ( self : Optional[Any] , **__SCREAMING_SNAKE_CASE : Union[str, Any] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ).image_processor
def _UpperCAmelCase ( self : Optional[Any] , **__SCREAMING_SNAKE_CASE : Union[str, Any] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ).qformer_tokenizer
def _UpperCAmelCase ( self : Any ):
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self : str ):
__a = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__a = [Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _UpperCAmelCase ( self : Dict ):
__a = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
__a = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__a = self.get_image_processor(do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 )
__a = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor.qformer_tokenizer , __SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : List[Any] ):
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = self.get_qformer_tokenizer()
__a = InstructBlipProcessor(
tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE , qformer_tokenizer=__SCREAMING_SNAKE_CASE )
__a = self.prepare_image_inputs()
__a = image_processor(__SCREAMING_SNAKE_CASE , return_tensors="np" )
__a = processor(images=__SCREAMING_SNAKE_CASE , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _UpperCAmelCase ( self : Dict ):
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = self.get_qformer_tokenizer()
__a = InstructBlipProcessor(
tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE , qformer_tokenizer=__SCREAMING_SNAKE_CASE )
__a = "lower newer"
__a = processor(text=__SCREAMING_SNAKE_CASE )
__a = tokenizer(__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
__a = qformer_tokenizer(__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["qformer_" + key] )
def _UpperCAmelCase ( self : List[Any] ):
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = self.get_qformer_tokenizer()
__a = InstructBlipProcessor(
tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE , qformer_tokenizer=__SCREAMING_SNAKE_CASE )
__a = "lower newer"
__a = self.prepare_image_inputs()
__a = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
# test if it raises when no input is passed
with pytest.raises(__SCREAMING_SNAKE_CASE ):
processor()
def _UpperCAmelCase ( self : Optional[int] ):
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = self.get_qformer_tokenizer()
__a = InstructBlipProcessor(
tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE , qformer_tokenizer=__SCREAMING_SNAKE_CASE )
__a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__a = processor.batch_decode(__SCREAMING_SNAKE_CASE )
__a = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Optional[int] ):
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = self.get_qformer_tokenizer()
__a = InstructBlipProcessor(
tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE , qformer_tokenizer=__SCREAMING_SNAKE_CASE )
__a = "lower newer"
__a = self.prepare_image_inputs()
__a = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
| 197
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __A ( ):
"""simple docstring"""
__a = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=_A )
__a = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=_A )
env_command_parser(subparsers=_A )
launch_command_parser(subparsers=_A )
tpu_command_parser(subparsers=_A )
test_command_parser(subparsers=_A )
# Let's go
__a = parser.parse_args()
if not hasattr(_A , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(_A )
if __name__ == "__main__":
main()
| 197
| 1
|
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : str = ["input_features", "is_longer"]
def __init__( self: Union[str, Any] ,lowerCamelCase_: int=64 ,lowerCamelCase_: int=48000 ,lowerCamelCase_: int=480 ,lowerCamelCase_: List[Any]=10 ,lowerCamelCase_: Dict=1024 ,lowerCamelCase_: Tuple=0.0 ,lowerCamelCase_: Dict=False ,lowerCamelCase_: float = 0 ,lowerCamelCase_: float = 14000 ,lowerCamelCase_: int = None ,lowerCamelCase_: str = "fusion" ,lowerCamelCase_: str = "repeatpad" ,**lowerCamelCase_: str ,) -> int:
super().__init__(
feature_size=lowerCamelCase_ ,sampling_rate=lowerCamelCase_ ,padding_value=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,**lowerCamelCase_ ,)
UpperCAmelCase_ : List[str] = top_db
UpperCAmelCase_ : List[str] = truncation
UpperCAmelCase_ : Optional[Any] = padding
UpperCAmelCase_ : Tuple = fft_window_size
UpperCAmelCase_ : int = (fft_window_size >> 1) + 1
UpperCAmelCase_ : List[str] = hop_length
UpperCAmelCase_ : Any = max_length_s
UpperCAmelCase_ : List[Any] = max_length_s * sampling_rate
UpperCAmelCase_ : Tuple = sampling_rate
UpperCAmelCase_ : Optional[Any] = frequency_min
UpperCAmelCase_ : Any = frequency_max
UpperCAmelCase_ : str = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=lowerCamelCase_ ,min_frequency=lowerCamelCase_ ,max_frequency=lowerCamelCase_ ,sampling_rate=lowerCamelCase_ ,norm=lowerCamelCase_ ,mel_scale="""htk""" ,)
UpperCAmelCase_ : Dict = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=lowerCamelCase_ ,min_frequency=lowerCamelCase_ ,max_frequency=lowerCamelCase_ ,sampling_rate=lowerCamelCase_ ,norm="""slaney""" ,mel_scale="""slaney""" ,)
def A__ ( self: Optional[int] ) -> Dict[str, Any]:
UpperCAmelCase_ : Optional[int] = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : Any = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def A__ ( self: List[str] ,lowerCamelCase_: np.array ,lowerCamelCase_: Optional[np.array] = None ) -> np.ndarray:
UpperCAmelCase_ : Optional[int] = spectrogram(
lowerCamelCase_ ,window_function(self.fft_window_size ,"""hann""" ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=lowerCamelCase_ ,log_mel="""dB""" ,)
return log_mel_spectrogram.T
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[str] ,lowerCamelCase_: Optional[Any] ) -> List[Any]:
UpperCAmelCase_ : Tuple = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
UpperCAmelCase_ : Optional[Any] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
UpperCAmelCase_ : List[Any] = [0]
# randomly choose index for each part
UpperCAmelCase_ : Any = np.random.choice(ranges[0] )
UpperCAmelCase_ : str = np.random.choice(ranges[1] )
UpperCAmelCase_ : Dict = np.random.choice(ranges[2] )
UpperCAmelCase_ : Tuple = mel[idx_front : idx_front + chunk_frames, :]
UpperCAmelCase_ : int = mel[idx_middle : idx_middle + chunk_frames, :]
UpperCAmelCase_ : Any = mel[idx_back : idx_back + chunk_frames, :]
UpperCAmelCase_ : Union[str, Any] = torch.tensor(mel[None, None, :] )
UpperCAmelCase_ : str = torch.nn.functional.interpolate(
lowerCamelCase_ ,size=[chunk_frames, 64] ,mode="""bilinear""" ,align_corners=lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = mel_shrink[0][0].numpy()
UpperCAmelCase_ : List[str] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 )
return mel_fusion
def A__ ( self: List[Any] ,lowerCamelCase_: np.array ,lowerCamelCase_: List[str] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Any ) -> np.array:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
UpperCAmelCase_ : int = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
UpperCAmelCase_ : Tuple = len(lowerCamelCase_ ) - max_length
UpperCAmelCase_ : Any = np.random.randint(0 ,overflow + 1 )
UpperCAmelCase_ : str = waveform[idx : idx + max_length]
UpperCAmelCase_ : Tuple = self._np_extract_fbank_features(lowerCamelCase_ ,self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
UpperCAmelCase_ : Optional[int] = self._np_extract_fbank_features(lowerCamelCase_ ,self.mel_filters )
UpperCAmelCase_ : Optional[int] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
UpperCAmelCase_ : int = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
UpperCAmelCase_ : Optional[int] = np.stack([mel, mel, mel, mel] ,axis=0 )
UpperCAmelCase_ : Union[str, Any] = False
else:
UpperCAmelCase_ : str = self._random_mel_fusion(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : int = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
UpperCAmelCase_ : List[Any] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
UpperCAmelCase_ : List[Any] = int(max_length / len(lowerCamelCase_ ) )
UpperCAmelCase_ : str = np.stack(np.tile(lowerCamelCase_ ,n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
UpperCAmelCase_ : List[Any] = int(max_length / len(lowerCamelCase_ ) )
UpperCAmelCase_ : List[str] = np.stack(np.tile(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : int = np.pad(lowerCamelCase_ ,(0, max_length - waveform.shape[0]) ,mode="""constant""" ,constant_values=0 )
if truncation == "fusion":
UpperCAmelCase_ : Optional[Any] = self._np_extract_fbank_features(lowerCamelCase_ ,self.mel_filters )
UpperCAmelCase_ : Optional[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 )
else:
UpperCAmelCase_ : List[Any] = self._np_extract_fbank_features(lowerCamelCase_ ,self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self: List[str] ,lowerCamelCase_: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,lowerCamelCase_: str = None ,lowerCamelCase_: Optional[str] = None ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: Optional[Union[str, TensorType]] = None ,**lowerCamelCase_: str ,) -> BatchFeature:
UpperCAmelCase_ : str = truncation if truncation is not None else self.truncation
UpperCAmelCase_ : Tuple = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
UpperCAmelCase_ : Optional[Any] = isinstance(lowerCamelCase_ ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
UpperCAmelCase_ : Tuple = is_batched_numpy or (
isinstance(lowerCamelCase_ ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase_ : Dict = [np.asarray(lowerCamelCase_ ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase_ ,np.ndarray ):
UpperCAmelCase_ : Tuple = np.asarray(lowerCamelCase_ ,dtype=np.floataa )
elif isinstance(lowerCamelCase_ ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase_ : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase_ : List[str] = [np.asarray(lowerCamelCase_ )]
# convert to mel spectrogram, truncate and pad if needed.
UpperCAmelCase_ : Any = [
self._get_input_mel(lowerCamelCase_ ,max_length if max_length else self.nb_max_samples ,lowerCamelCase_ ,lowerCamelCase_ )
for waveform in raw_speech
]
UpperCAmelCase_ : str = []
UpperCAmelCase_ : Any = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase_ )
is_longer.append(lowerCamelCase_ )
if truncation == "fusion" and sum(lowerCamelCase_ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
UpperCAmelCase_ : Union[str, Any] = np.random.randint(0 ,len(lowerCamelCase_ ) )
UpperCAmelCase_ : Union[str, Any] = True
if isinstance(input_mel[0] ,lowerCamelCase_ ):
UpperCAmelCase_ : Optional[Any] = [np.asarray(lowerCamelCase_ ,dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
UpperCAmelCase_ : Dict = [[longer] for longer in is_longer]
UpperCAmelCase_ : List[Any] = {"""input_features""": input_mel, """is_longer""": is_longer}
UpperCAmelCase_ : Optional[int] = BatchFeature(lowerCamelCase_ )
if return_tensors is not None:
UpperCAmelCase_ : List[str] = input_features.convert_to_tensors(lowerCamelCase_ )
return input_features
| 706
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase_ = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 322
| 0
|
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
snake_case = logging.getLogger()
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("-f" )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
return args.f
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = {}
SCREAMING_SNAKE_CASE : Tuple = os.path.join(lowercase , "all_results.json" )
if os.path.exists(lowercase ):
with open(lowercase , "r" ) as f:
SCREAMING_SNAKE_CASE : List[Any] = json.load(lowercase )
else:
raise ValueError(F'''can\'t find {path}''' )
return results
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
snake_case = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
@classmethod
def _A ( cls : List[str] ):
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
SCREAMING_SNAKE_CASE : Union[str, Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : str = os.path.join(cls.tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
SCREAMING_SNAKE_CASE : Optional[int] = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def _A ( cls : int ):
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : str = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : List[str] = f'''
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
'''.split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE : str = get_results(UpperCAmelCase_ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase_ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase_ , "glue_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Tuple = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Optional[Any] = f'''
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
'''.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE : str = get_results(UpperCAmelCase_ )
self.assertLess(result["perplexity"] , 100 )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase_ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase_ , "clm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _A ( self : int ):
SCREAMING_SNAKE_CASE : List[Any] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Tuple = f'''
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE : Union[str, Any] = get_results(UpperCAmelCase_ )
self.assertLess(result["perplexity"] , 42 )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase_ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase_ , "mlm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _A ( self : Any ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
SCREAMING_SNAKE_CASE : str = 7 if get_gpu_count() > 1 else 2
SCREAMING_SNAKE_CASE : Optional[int] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Any = f'''
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE : Optional[Any] = get_results(UpperCAmelCase_ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertLess(result["train_loss"] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase_ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase_ , "ner_no_trainer" ) ) )
@unittest.skip(reason="Fix me @muellerzr" )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : int = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Optional[Any] = f'''
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE : List[str] = get_results(UpperCAmelCase_ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["eval_f1"] , 28 )
self.assertGreaterEqual(result["eval_exact"] , 28 )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase_ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase_ , "qa_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : List[str] = f'''
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE : Tuple = get_results(UpperCAmelCase_ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase_ , "swag_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : List[str] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Dict = f'''
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE : Any = get_results(UpperCAmelCase_ )
self.assertGreaterEqual(result["eval_rouge1"] , 10 )
self.assertGreaterEqual(result["eval_rouge2"] , 2 )
self.assertGreaterEqual(result["eval_rougeL"] , 7 )
self.assertGreaterEqual(result["eval_rougeLsum"] , 7 )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase_ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase_ , "summarization_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : Optional[int] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Optional[Any] = f'''
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE : Any = get_results(UpperCAmelCase_ )
self.assertGreaterEqual(result["eval_bleu"] , 30 )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase_ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase_ , "translation_no_trainer" ) ) )
@slow
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : Optional[int] = logging.StreamHandler(sys.stdout )
logger.addHandler(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Dict = f'''
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
'''.split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE : Dict = get_results(UpperCAmelCase_ )
self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : Optional[int] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Dict = f'''
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
'''.split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE : Union[str, Any] = get_results(UpperCAmelCase_ )
# The base model scores a 25%
self.assertGreaterEqual(result["eval_accuracy"] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase_ , "step_1" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase_ , "image_classification_no_trainer" ) ) )
| 62
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case = {
"""configuration_jukebox""": [
"""JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""JukeboxConfig""",
"""JukeboxPriorConfig""",
"""JukeboxVQVAEConfig""",
],
"""tokenization_jukebox""": ["""JukeboxTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""JukeboxModel""",
"""JukeboxPreTrainedModel""",
"""JukeboxVQVAE""",
"""JukeboxPrior""",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 62
| 1
|
"""simple docstring"""
def lowercase__(A ) ->bool:
lowercase__ : Tuple= (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowercase__(A = 5_000 ) ->int:
lowercase__ : str= [(i * (3 * i - 1)) // 2 for i in range(1 , A )]
for i, pentagonal_i in enumerate(A ):
for j in range(A , len(A ) ):
lowercase__ : List[Any]= pentagonal_nums[j]
lowercase__ : int= pentagonal_i + pentagonal_j
lowercase__ : Optional[int]= pentagonal_j - pentagonal_i
if is_pentagonal(A ) and is_pentagonal(A ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 715
|
"""simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def lowercase__(A ) ->bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase__() ->Iterator[int]:
"""simple docstring"""
lowercase__ : Union[str, Any]= 2
while True:
if is_prime(A ):
yield num
num += 1
def lowercase__(A = 2_000_000 ) ->int:
"""simple docstring"""
return sum(takewhile(lambda A : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 85
| 0
|
def _UpperCAmelCase (UpperCamelCase_ : int = 100 ):
'''simple docstring'''
_lowerCAmelCase : Any = (n * (n + 1) // 2) ** 2
_lowerCAmelCase : Tuple = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 429
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=_a )
class __snake_case (_a ):
lowerCAmelCase__ = field(default="audio-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
lowerCAmelCase__ = Features({"audio": Audio()} )
lowerCAmelCase__ = Features({"labels": ClassLabel} )
lowerCAmelCase__ = "audio"
lowerCAmelCase__ = "labels"
def SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCAmelCase : Tuple ) -> int:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , _UpperCAmelCase ):
raise ValueError(f"Column {self.label_column} is not a ClassLabel." )
_lowerCAmelCase : Any = copy.deepcopy(self )
_lowerCAmelCase : Tuple = self.label_schema.copy()
_lowerCAmelCase : List[Any] = features[self.label_column]
_lowerCAmelCase : Optional[Any] = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> Dict[str, str]:
'''simple docstring'''
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 429
| 1
|
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
UpperCamelCase_ = pytest.mark.integration
@require_faiss
class __SCREAMING_SNAKE_CASE ( __lowerCamelCase ):
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Optional[int] =Dataset.from_dict({'''filename''': ['''my_name-train''' + '''_''' + str(a_ ) for x in np.arange(30 ).tolist()]} )
return dset
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
import faiss
lowercase : Dataset =self._create_dummy_dataset()
lowercase : Dict =dset.map(
lambda UpperCAmelCase__ , UpperCAmelCase__ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=a_ , keep_in_memory=a_ )
lowercase : Optional[int] =dset.add_faiss_index('''vecs''' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowercase : str =dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
dset.drop_index('''vecs''' )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
import faiss
lowercase : Dataset =self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowercase : Optional[Any] =dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
import faiss
lowercase : Dataset =self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=a_ ) as tmp_file:
dset.save_faiss_index('''vecs''' , tmp_file.name )
dset.load_faiss_index('''vecs2''' , tmp_file.name )
os.unlink(tmp_file.name )
lowercase : Optional[Any] =dset.get_nearest_examples('''vecs2''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Dataset =self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' )
dset.drop_index('''vecs''' )
self.assertRaises(a_ , partial(dset.get_nearest_examples , '''vecs2''' , np.ones(5 , dtype=np.floataa ) ) )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
from elasticsearch import Elasticsearch
lowercase : Dataset =self._create_dummy_dataset()
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
lowercase : Optional[Any] ={"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
lowercase : Optional[int] ={"hits": {"hits": [{"_score": 1, "_id": 29}]}}
lowercase : Dict =Elasticsearch()
dset.add_elasticsearch_index('''filename''' , es_client=a_ )
lowercase : str =dset.get_nearest_examples('''filename''' , '''my_name-train_29''' )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
@require_faiss
class __SCREAMING_SNAKE_CASE ( __lowerCamelCase ):
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
import faiss
lowercase : Any =FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowercase : Optional[int] =np.zeros(5 , dtype=np.floataa )
lowercase : Tuple =1
lowercase : Tuple =index.search(a_ )
self.assertRaises(a_ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowercase : int =np.eye(5 , dtype=np.floataa )[::-1]
lowercase : Any =index.search_batch(a_ )
self.assertRaises(a_ , index.search_batch , queries[0] )
lowercase : int =[scores[0] for scores in total_scores]
lowercase : Any =[indices[0] for indices in total_indices]
self.assertGreater(np.min(a_ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , a_ )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
import faiss
lowercase : Tuple =FaissIndex(string_factory='''Flat''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowercase : str =FaissIndex(string_factory='''LSH''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(a_ ):
lowercase : int =FaissIndex(string_factory='''Flat''' , custom_index=faiss.IndexFlat(5 ) )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
import faiss
lowercase : Union[str, Any] =faiss.IndexFlat(5 )
lowercase : Optional[int] =FaissIndex(custom_index=a_ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
import faiss
lowercase : Union[str, Any] =FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=a_ ) as tmp_file:
index.save(tmp_file.name )
lowercase : str =FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowercase : Tuple =np.zeros(5 , dtype=np.floataa )
lowercase : Dict =1
lowercase : Tuple =index.search(a_ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _lowerCAmelCase ( __magic_name__ : int ) -> Any:
import faiss
lowercase : Any =FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowercase : Optional[Any] ="index.faiss"
lowercase : int =f'''mock://{index_name}'''
index.save(lowerCAmelCase_ , storage_options=mockfs.storage_options )
lowercase : str =FaissIndex.load(lowerCAmelCase_ , storage_options=mockfs.storage_options )
lowercase : Any =np.zeros(5 , dtype=np.floataa )
lowercase : Optional[int] =1
lowercase : Union[str, Any] =index.search(lowerCAmelCase_ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class __SCREAMING_SNAKE_CASE ( __lowerCamelCase ):
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
from elasticsearch import Elasticsearch
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
lowercase : Optional[Any] =Elasticsearch()
lowercase : Tuple ={"acknowledged": True}
lowercase : List[str] =ElasticSearchIndex(es_client=a_ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['''foo''', '''bar''', '''foobar'''] )
# single query
lowercase : List[str] ="foo"
lowercase : Tuple ={"hits": {"hits": [{"_score": 1, "_id": 0}]}}
lowercase : Union[str, Any] =index.search(a_ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowercase : List[Any] ="foo"
lowercase : List[str] ={"hits": {"hits": [{"_score": 1, "_id": 0}]}}
lowercase : Optional[int] =index.search(a_ , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowercase : Union[str, Any] =["foo", "bar", "foobar"]
lowercase : Any ={"hits": {"hits": [{"_score": 1, "_id": 1}]}}
lowercase : str =index.search_batch(a_ )
lowercase : Optional[int] =[scores[0] for scores in total_scores]
lowercase : Tuple =[indices[0] for indices in total_indices]
self.assertGreater(np.min(a_ ) , 0 )
self.assertListEqual([1, 1, 1] , a_ )
# batched queries with timeout
lowercase : int =["foo", "bar", "foobar"]
lowercase : int ={"hits": {"hits": [{"_score": 1, "_id": 1}]}}
lowercase : Union[str, Any] =index.search_batch(a_ , request_timeout=30 )
lowercase : Tuple =[scores[0] for scores in total_scores]
lowercase : List[Any] =[indices[0] for indices in total_indices]
self.assertGreater(np.min(a_ ) , 0 )
self.assertListEqual([1, 1, 1] , a_ )
| 707
|
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _lowerCAmelCase ( __magic_name__ : Dict ) -> Dict:
for param in module.parameters():
lowercase : List[str] =False
def _lowerCAmelCase ( ) -> List[str]:
lowercase : Dict ='''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowercase : Optional[int] ='''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def _lowerCAmelCase ( __magic_name__ : Union[str, Any] ) -> str:
lowercase : Optional[int] =plt.imshow(__magic_name__ )
fig.axes.get_xaxis().set_visible(__magic_name__ )
fig.axes.get_yaxis().set_visible(__magic_name__ )
plt.show()
def _lowerCAmelCase ( ) -> List[Any]:
lowercase : Any =datetime.now()
lowercase : Dict =current_time.strftime('''%H:%M:%S''' )
return timestamp
| 88
| 0
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
_snake_case = str(bin(_SCREAMING_SNAKE_CASE ) )[2:] # remove the leading "0b"
_snake_case = str(bin(_SCREAMING_SNAKE_CASE ) )[2:] # remove the leading "0b"
_snake_case = max(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(_SCREAMING_SNAKE_CASE ) , b_binary.zfill(_SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 585
|
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
class lowercase_ ( a_ ):
def __init__( self : int , _lowercase : List[str]=None , **_lowercase : List[str] ):
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead." , _lowercase , )
super().__init__(args=_lowercase , **_lowercase )
| 308
| 0
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
_UpperCamelCase : Optional[Any] = logging.getLogger(__name__)
@dataclass
class _lowerCAmelCase:
"""simple docstring"""
lowerCamelCase__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''})
lowerCamelCase__ = field(
default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''})
lowerCamelCase__ = field(
default=__lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''})
lowerCamelCase__ = field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
lowerCamelCase__ = field(
default=__lowercase , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
lowerCamelCase__ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
lowerCamelCase__ = field(
default=__lowercase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
@dataclass
class _lowerCAmelCase:
"""simple docstring"""
lowerCamelCase__ = field(default=__lowercase , metadata={'''help''': '''The input training data file (a text file).'''})
lowerCamelCase__ = field(
default=__lowercase , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
lowerCamelCase__ = field(
default=__lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''})
lowerCamelCase__ = field(
default=__lowercase , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
lowerCamelCase__ = field(
default=__lowercase , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. If passed, sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowerCamelCase__ = field(
default=__lowercase , metadata={
'''help''': (
'''Whether to pad all samples to the maximum sentence length. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch. More '''
'''efficient on GPU but very bad for TPU.'''
)
} , )
lowerCamelCase__ = field(
default=__lowercase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
lowerCamelCase__ = field(
default=__lowercase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def SCREAMING_SNAKE_CASE__ ( self )-> str:
if self.train_file is not None:
__A = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__A = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class _lowerCAmelCase:
"""simple docstring"""
lowerCamelCase__ = 42
lowerCamelCase__ = True
lowerCamelCase__ = None
lowerCamelCase__ = None
def __call__( self , UpperCAmelCase )-> Tuple:
__A = '''label''' if '''label''' in features[0].keys() else '''labels'''
__A = [feature.pop(_A ) for feature in features]
__A = len(_A )
__A = len(features[0]['''input_ids'''] )
__A = [
[{k: v[i] for k, v in feature.items()} for i in range(_A )] for feature in features
]
__A = list(chain(*_A ) )
__A = self.tokenizer.pad(
_A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
__A = {k: v.view(_A , _A , -1 ) for k, v in batch.items()}
# Add back labels
__A = torch.tensor(_A , dtype=torch.intaa )
return batch
def __UpperCamelCase ( ) -> Optional[int]:
'''simple docstring'''
__A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__A , __A , __A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__A , __A , __A = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' , __snake_case , __snake_case )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__A = training_args.get_process_log_level()
logger.setLevel(__snake_case )
datasets.utils.logging.set_verbosity(__snake_case )
transformers.utils.logging.set_verbosity(__snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
__A = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__A = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__A = {}
if data_args.train_file is not None:
__A = data_args.train_file
if data_args.validation_file is not None:
__A = data_args.validation_file
__A = data_args.train_file.split('''.''' )[-1]
__A = load_dataset(
__snake_case , data_files=__snake_case , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__A = load_dataset(
'''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__A = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__A = [F"ending{i}" for i in range(4 )]
__A = '''sent1'''
__A = '''sent2'''
if data_args.max_seq_length is None:
__A = tokenizer.model_max_length
if max_seq_length > 1_0_2_4:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
__A = 1_0_2_4
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
__A = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(snake_case ):
__A = [[context] * 4 for context in examples[context_name]]
__A = examples[question_header_name]
__A = [
[F"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(__snake_case )
]
# Flatten out
__A = list(chain(*__snake_case ) )
__A = list(chain(*__snake_case ) )
# Tokenize
__A = tokenizer(
__snake_case , __snake_case , truncation=__snake_case , max_length=__snake_case , padding='''max_length''' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(__snake_case ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
__A = raw_datasets['''train''']
if data_args.max_train_samples is not None:
__A = min(len(__snake_case ) , data_args.max_train_samples )
__A = train_dataset.select(range(__snake_case ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
__A = train_dataset.map(
__snake_case , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
__A = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
__A = min(len(__snake_case ) , data_args.max_eval_samples )
__A = eval_dataset.select(range(__snake_case ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
__A = eval_dataset.map(
__snake_case , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__A = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=__snake_case , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(snake_case ):
__A , __A = eval_predictions
__A = np.argmax(__snake_case , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__A = Trainer(
model=__snake_case , args=__snake_case , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=__snake_case , data_collator=__snake_case , compute_metrics=__snake_case , )
# Training
if training_args.do_train:
__A = None
if training_args.resume_from_checkpoint is not None:
__A = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__A = last_checkpoint
__A = trainer.train(resume_from_checkpoint=__snake_case )
trainer.save_model() # Saves the tokenizer too for easy upload
__A = train_result.metrics
__A = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__snake_case )
)
__A = min(__snake_case , len(__snake_case ) )
trainer.log_metrics('''train''' , __snake_case )
trainer.save_metrics('''train''' , __snake_case )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__A = trainer.evaluate()
__A = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__snake_case )
__A = min(__snake_case , len(__snake_case ) )
trainer.log_metrics('''eval''' , __snake_case )
trainer.save_metrics('''eval''' , __snake_case )
__A = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**__snake_case )
else:
trainer.create_model_card(**__snake_case )
def __UpperCamelCase ( snake_case ) -> Dict:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 705
|
_UpperCamelCase : Optional[int] = 8.31_44_62 # Unit - J mol-1 K-1
def __UpperCamelCase ( snake_case , snake_case , snake_case ) -> float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def __UpperCamelCase ( snake_case , snake_case , snake_case ) -> float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 341
| 0
|
def _lowerCamelCase ( snake_case , snake_case ):
assert x is not None
assert y is not None
_lowerCAmelCase = len(snake_case )
_lowerCAmelCase = len(snake_case )
# declaring the array for storing the dp values
_lowerCAmelCase = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
_lowerCAmelCase = 1 if x[i - 1] == y[j - 1] else 0
_lowerCAmelCase = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
_lowerCAmelCase = """"""
_lowerCAmelCase = m, n
while i > 0 and j > 0:
_lowerCAmelCase = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
_lowerCAmelCase = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
_lowercase: Optional[Any] = """AGGTAB"""
_lowercase: Optional[Any] = """GXTXAYB"""
_lowercase: List[Any] = 4
_lowercase: int = """GTAB"""
_lowercase: int = longest_common_subsequence(a, b)
print('''len =''', ln, ''', sub-sequence =''', subseq)
import doctest
doctest.testmod()
| 192
|
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[int] = x
UpperCamelCase : str = y
for step in range(SCREAMING_SNAKE_CASE ): # noqa: B007
UpperCamelCase : str = a * a - b * b + x
UpperCamelCase : Any = 2 * a * b + y
UpperCamelCase : List[str] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(SCREAMING_SNAKE_CASE , 1 , 1 ) )
def UpperCamelCase (SCREAMING_SNAKE_CASE = 800 , SCREAMING_SNAKE_CASE = 600 , SCREAMING_SNAKE_CASE = -0.6 , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = 3.2 , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = True , ):
UpperCamelCase : Tuple = Image.new("""RGB""" , (image_width, image_height) )
UpperCamelCase : Dict = img.load()
# loop through the image-coordinates
for image_x in range(SCREAMING_SNAKE_CASE ):
for image_y in range(SCREAMING_SNAKE_CASE ):
# determine the figure-coordinates based on the image-coordinates
UpperCamelCase : Dict = figure_width / image_width * image_height
UpperCamelCase : Tuple = figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCamelCase : Dict = figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCamelCase : Union[str, Any] = get_distance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCamelCase : int = get_color_coded_rgb(SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : Optional[int] = get_black_and_white_rgb(SCREAMING_SNAKE_CASE )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
__magic_name__ : int = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 102
| 0
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__: Dict = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> List[str]:
SCREAMING_SNAKE_CASE_ : Tuple = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'deit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'deit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'deit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'deit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'deit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'deit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'deit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'deit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'deit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'deit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'deit.embeddings.cls_token'),
('dist_token', 'deit.embeddings.distillation_token'),
('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'deit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
SCREAMING_SNAKE_CASE_ : Any = [(pair[0], pair[1][4:]) if pair[1].startswith('deit' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('norm.weight', 'deit.layernorm.weight'),
('norm.bias', 'deit.layernorm.bias'),
('head.weight', 'cls_classifier.weight'),
('head.bias', 'cls_classifier.bias'),
('head_dist.weight', 'distillation_classifier.weight'),
('head_dist.bias', 'distillation_classifier.bias'),
] )
return rename_keys
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> int:
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE_ : Any = ''
else:
SCREAMING_SNAKE_CASE_ : str = 'deit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_ : List[Any] = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_ : int = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE_ : str = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE_ : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE_ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE_ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE_ : List[str] = in_proj_bias[-config.hidden_size :]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : Tuple = dct.pop(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = val
def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE_ : Optional[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ : Dict = DeiTConfig()
# all deit models have fine-tuned heads
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
SCREAMING_SNAKE_CASE_ : Tuple = 1000
SCREAMING_SNAKE_CASE_ : Tuple = 'huggingface/label-files'
SCREAMING_SNAKE_CASE_ : List[Any] = 'imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE_ : List[str] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE_ : Dict = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ : List[Any] = idalabel
SCREAMING_SNAKE_CASE_ : Tuple = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ : int = int(deit_name[-6:-4] )
SCREAMING_SNAKE_CASE_ : int = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('tiny' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = 192
SCREAMING_SNAKE_CASE_ : int = 768
SCREAMING_SNAKE_CASE_ : List[Any] = 12
SCREAMING_SNAKE_CASE_ : List[Any] = 3
elif deit_name[9:].startswith('small' ):
SCREAMING_SNAKE_CASE_ : Tuple = 384
SCREAMING_SNAKE_CASE_ : Tuple = 1536
SCREAMING_SNAKE_CASE_ : List[Any] = 12
SCREAMING_SNAKE_CASE_ : Optional[Any] = 6
if deit_name[9:].startswith('base' ):
pass
elif deit_name[4:].startswith('large' ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1024
SCREAMING_SNAKE_CASE_ : Any = 4096
SCREAMING_SNAKE_CASE_ : Tuple = 24
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 16
# load original model from timm
SCREAMING_SNAKE_CASE_ : Dict = timm.create_model(SCREAMING_SNAKE_CASE , pretrained=SCREAMING_SNAKE_CASE )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE_ : Tuple = timm_model.state_dict()
SCREAMING_SNAKE_CASE_ : Optional[int] = create_rename_keys(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
read_in_q_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# load HuggingFace model
SCREAMING_SNAKE_CASE_ : Dict = DeiTForImageClassificationWithTeacher(SCREAMING_SNAKE_CASE ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by DeiTImageProcessor
SCREAMING_SNAKE_CASE_ : Any = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
SCREAMING_SNAKE_CASE_ : Optional[Any] = DeiTImageProcessor(size=SCREAMING_SNAKE_CASE , crop_size=config.image_size )
SCREAMING_SNAKE_CASE_ : List[Any] = image_processor(images=prepare_img() , return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = encoding['pixel_values']
SCREAMING_SNAKE_CASE_ : Optional[int] = model(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = timm_model(SCREAMING_SNAKE_CASE )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE , outputs.logits , atol=1e-3 )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
print(f'Saving model {deit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase__: int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
lowerCAmelCase__: Tuple = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 713
|
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
lowerCAmelCase__: List[Any] = logging.get_logger(__name__)
class snake_case_ :
__lowerCamelCase : Any = None
@experimental
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return _map_with_joblib(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_proc if num_proc <= len(SCREAMING_SNAKE_CASE ) else len(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = [] # We organize the splits ourselve (contiguous splits)
for index in range(SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : List[str] = len(SCREAMING_SNAKE_CASE ) // num_proc
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(SCREAMING_SNAKE_CASE ) % num_proc
SCREAMING_SNAKE_CASE_ : List[Any] = div * index + min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(SCREAMING_SNAKE_CASE ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
f'Error dividing inputs iterable among processes. '
f'Total number of objects {len(SCREAMING_SNAKE_CASE )}, '
f'length: {sum(len(i[1] ) for i in split_kwds )}' )
logger.info(
f'Spawning {num_proc} processes for {len(SCREAMING_SNAKE_CASE )} objects in slices of {[len(i[1] ) for i in split_kwds]}' )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = None, None
if not disable_tqdm:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = (RLock(),), tqdm.set_lock
with Pool(SCREAMING_SNAKE_CASE , initargs=SCREAMING_SNAKE_CASE , initializer=SCREAMING_SNAKE_CASE ) as pool:
SCREAMING_SNAKE_CASE_ : Optional[int] = pool.map(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
logger.info(f'Finished {num_proc} processes' )
SCREAMING_SNAKE_CASE_ : List[str] = [obj for proc_res in mapped for obj in proc_res]
logger.info(f'Unpacked {len(SCREAMING_SNAKE_CASE )} objects' )
return mapped
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
# progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib,
# and it requires monkey-patching joblib internal classes which is subject to change
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=SCREAMING_SNAKE_CASE ):
return joblib.Parallel()(
joblib.delayed(SCREAMING_SNAKE_CASE )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> Tuple:
SCREAMING_SNAKE_CASE_ : str = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
SCREAMING_SNAKE_CASE_ : Dict = None
| 311
| 0
|
"""simple docstring"""
from __future__ import annotations
import typing
from collections import Counter
def _A( lowerCAmelCase ):
A__ : typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(UpperCamelCase_ , max_perimeter + 1 ):
A__ : Tuple = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(UpperCamelCase_ ):
A__ : List[str] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def _A( lowerCAmelCase = 1000 ):
A__ : List[str] = pythagorean_triple(UpperCamelCase_ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(F'Perimeter {solution()} has maximum solutions')
| 363
|
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class UpperCAmelCase ( _snake_case ):
UpperCAmelCase = ["image_processor", "tokenizer"]
UpperCAmelCase = "OwlViTImageProcessor"
UpperCAmelCase = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : List[str] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[Any]=None , **__lowerCamelCase : str ):
UpperCAmelCase__ :int = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowerCamelCase , )
UpperCAmelCase__ :Union[str, Any] = kwargs.pop('''feature_extractor''' )
UpperCAmelCase__ :List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__lowerCamelCase , __lowerCamelCase )
def __call__( self : int , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Dict=None , __lowerCamelCase : str="max_length" , __lowerCamelCase : Any="np" , **__lowerCamelCase : Tuple ):
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''' )
if text is not None:
if isinstance(__lowerCamelCase , __lowerCamelCase ) or (isinstance(__lowerCamelCase , __lowerCamelCase ) and not isinstance(text[0] , __lowerCamelCase )):
UpperCAmelCase__ :Any = [self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )]
elif isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(text[0] , __lowerCamelCase ):
UpperCAmelCase__ :Tuple = []
# Maximum number of queries across batch
UpperCAmelCase__ :List[str] = max([len(__lowerCamelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__lowerCamelCase ) != max_num_queries:
UpperCAmelCase__ :str = t + [''' '''] * (max_num_queries - len(__lowerCamelCase ))
UpperCAmelCase__ :Tuple = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
encodings.append(__lowerCamelCase )
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' )
if return_tensors == "np":
UpperCAmelCase__ :List[Any] = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
UpperCAmelCase__ :Any = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
UpperCAmelCase__ :List[Any] = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
UpperCAmelCase__ :Union[str, Any] = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
UpperCAmelCase__ :Any = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 )
UpperCAmelCase__ :List[str] = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
UpperCAmelCase__ :Optional[Any] = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
UpperCAmelCase__ :int = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
else:
raise ValueError('''Target return tensor type could not be returned''' )
UpperCAmelCase__ :List[Any] = BatchEncoding()
UpperCAmelCase__ :Union[str, Any] = input_ids
UpperCAmelCase__ :Dict = attention_mask
if query_images is not None:
UpperCAmelCase__ :Tuple = BatchEncoding()
UpperCAmelCase__ :int = self.image_processor(
__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase ).pixel_values
UpperCAmelCase__ :Optional[int] = query_pixel_values
if images is not None:
UpperCAmelCase__ :str = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
if text is not None and images is not None:
UpperCAmelCase__ :Dict = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
UpperCAmelCase__ :Tuple = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCamelCase ) , tensor_type=__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : int , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : int ):
return self.image_processor.post_process(*__lowerCamelCase , **__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Dict , *__lowerCamelCase : Any , **__lowerCamelCase : Tuple ):
return self.image_processor.post_process_object_detection(*__lowerCamelCase , **__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , *__lowerCamelCase : Tuple , **__lowerCamelCase : str ):
return self.image_processor.post_process_image_guided_detection(*__lowerCamelCase , **__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : int , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Union[str, Any] ):
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Dict ):
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowerCamelCase , )
return self.image_processor_class
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __lowerCamelCase , )
return self.image_processor
| 467
| 0
|
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
UpperCamelCase__ = (
"""4S 3H 2C 7S 5H""",
"""9D 8H 2C 6S 7H""",
"""2D 6D 9D TH 7D""",
"""TC 8C 2S JH 6C""",
"""JH 8S TH AH QH""",
"""TS KS 5S 9S AC""",
"""KD 6S 9D TH AD""",
"""KS 8D 4D 9S 4S""", # pair
"""8C 4S KH JS 4D""", # pair
"""QH 8H KD JH 8S""", # pair
"""KC 4H KS 2H 8D""", # pair
"""KD 4S KC 3H 8S""", # pair
"""AH 8S AS KC JH""", # pair
"""3H 4C 4H 3S 2H""", # 2 pairs
"""5S 5D 2C KH KH""", # 2 pairs
"""3C KH 5D 5S KH""", # 2 pairs
"""AS 3C KH AD KH""", # 2 pairs
"""7C 7S 3S 7H 5S""", # 3 of a kind
"""7C 7S KH 2H 7H""", # 3 of a kind
"""AC KH QH AH AS""", # 3 of a kind
"""2H 4D 3C AS 5S""", # straight (low ace)
"""3C 5C 4C 2C 6H""", # straight
"""6S 8S 7S 5H 9H""", # straight
"""JS QS 9H TS KH""", # straight
"""QC KH TS JS AH""", # straight (high ace)
"""8C 9C 5C 3C TC""", # flush
"""3S 8S 9S 5S KS""", # flush
"""4C 5C 9C 8C KC""", # flush
"""JH 8H AH KH QH""", # flush
"""3D 2H 3H 2C 2D""", # full house
"""2H 2C 3S 3H 3D""", # full house
"""KH KC 3S 3H 3D""", # full house
"""JC 6H JS JD JH""", # 4 of a kind
"""JC 7H JS JD JH""", # 4 of a kind
"""JC KH JS JD JH""", # 4 of a kind
"""2S AS 4S 5S 3S""", # straight flush (low ace)
"""2D 6D 3D 4D 5D""", # straight flush
"""5C 6C 3C 7C 4C""", # straight flush
"""JH 9H TH KH QH""", # straight flush
"""JH AH TH KH QH""", # royal flush (high ace straight flush)
)
UpperCamelCase__ = (
("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""),
("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""),
("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""),
("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""),
("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""),
("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""),
("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""),
("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""),
("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""),
("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""),
("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""),
("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""),
("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""),
("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""),
("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""),
("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""),
("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""),
("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""),
("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""),
("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""),
("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""),
("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""),
("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""),
("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""),
("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""),
("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""),
("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""),
("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""),
("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""),
)
UpperCamelCase__ = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", True),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", False),
("""AS 3S 4S 8S 2S""", True),
)
UpperCamelCase__ = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", False),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", True),
)
UpperCamelCase__ = (
("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 14]),
("""2H 5D 3C AS 5S""", False, [14, 5, 5, 3, 2]),
("""JH QD KC AS TS""", False, [14, 13, 12, 11, 10]),
("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]),
)
UpperCamelCase__ = (
("""JH AH TH KH QH""", 0),
("""JH 9H TH KH QH""", 0),
("""JC KH JS JD JH""", 7),
("""KH KC 3S 3H 3D""", 6),
("""8C 9C 5C 3C TC""", 0),
("""JS QS 9H TS KH""", 0),
("""7C 7S KH 2H 7H""", 3),
("""3C KH 5D 5S KH""", 2),
("""QH 8H KD JH 8S""", 1),
("""2D 6D 9D TH 7D""", 0),
)
UpperCamelCase__ = (
("""JH AH TH KH QH""", 23),
("""JH 9H TH KH QH""", 22),
("""JC KH JS JD JH""", 21),
("""KH KC 3S 3H 3D""", 20),
("""8C 9C 5C 3C TC""", 19),
("""JS QS 9H TS KH""", 18),
("""7C 7S KH 2H 7H""", 17),
("""3C KH 5D 5S KH""", 16),
("""QH 8H KD JH 8S""", 15),
("""2D 6D 9D TH 7D""", 14),
)
def _a ( ):
__lowerCAmelCase , __lowerCAmelCase = randrange(len(SCREAMING_SNAKE_CASE_ ) ), randrange(len(SCREAMING_SNAKE_CASE_ ) )
__lowerCAmelCase = ["Loss", "Tie", "Win"][(play >= oppo) + (play > oppo)]
__lowerCAmelCase , __lowerCAmelCase = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def _a ( SCREAMING_SNAKE_CASE_ : int = 1_00 ):
return (generate_random_hand() for _ in range(SCREAMING_SNAKE_CASE_ ))
@pytest.mark.parametrize("hand, expected" , SCREAMING_SNAKE_CASE_ )
def _a ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple ):
assert PokerHand(SCREAMING_SNAKE_CASE_ )._is_flush() == expected
@pytest.mark.parametrize("hand, expected" , SCREAMING_SNAKE_CASE_ )
def _a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict ):
assert PokerHand(SCREAMING_SNAKE_CASE_ )._is_straight() == expected
@pytest.mark.parametrize("hand, expected, card_values" , SCREAMING_SNAKE_CASE_ )
def _a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] ):
__lowerCAmelCase = PokerHand(SCREAMING_SNAKE_CASE_ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("hand, expected" , SCREAMING_SNAKE_CASE_ )
def _a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str ):
assert PokerHand(SCREAMING_SNAKE_CASE_ )._is_same_kind() == expected
@pytest.mark.parametrize("hand, expected" , SCREAMING_SNAKE_CASE_ )
def _a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] ):
assert PokerHand(SCREAMING_SNAKE_CASE_ )._hand_type == expected
@pytest.mark.parametrize("hand, other, expected" , SCREAMING_SNAKE_CASE_ )
def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
assert PokerHand(SCREAMING_SNAKE_CASE_ ).compare_with(PokerHand(SCREAMING_SNAKE_CASE_ ) ) == expected
@pytest.mark.parametrize("hand, other, expected" , generate_random_hands() )
def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int] ):
assert PokerHand(SCREAMING_SNAKE_CASE_ ).compare_with(PokerHand(SCREAMING_SNAKE_CASE_ ) ) == expected
def _a ( ):
__lowerCAmelCase = [PokerHand(SCREAMING_SNAKE_CASE_ ) for hand in SORTED_HANDS]
__lowerCAmelCase = poker_hands.copy()
shuffle(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = chain(sorted(SCREAMING_SNAKE_CASE_ ) )
for index, hand in enumerate(SCREAMING_SNAKE_CASE_ ):
assert hand == poker_hands[index]
def _a ( ):
# Test that five high straights are compared correctly.
__lowerCAmelCase = [PokerHand("2D AC 3H 4H 5S" ), PokerHand("2S 3H 4H 5S 6C" )]
pokerhands.sort(reverse=SCREAMING_SNAKE_CASE_ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def _a ( ):
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
__lowerCAmelCase = PokerHand("2C 4S AS 3D 5C" )
__lowerCAmelCase = True
__lowerCAmelCase = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def _a ( ):
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
__lowerCAmelCase = 0
__lowerCAmelCase = os.path.abspath(os.path.dirname(SCREAMING_SNAKE_CASE_ ) )
__lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , "poker_hands.txt" )
with open(SCREAMING_SNAKE_CASE_ ) as file_hand:
for line in file_hand:
__lowerCAmelCase = line[:14].strip()
__lowerCAmelCase = line[15:].strip()
__lowerCAmelCase , __lowerCAmelCase = PokerHand(SCREAMING_SNAKE_CASE_ ), PokerHand(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = player.compare_with(SCREAMING_SNAKE_CASE_ )
if output == "Win":
answer += 1
assert answer == 3_76
| 552
|
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class a__ ( snake_case__ ):
def __init__( self , _A = "▁" , _A = True , _A = "<unk>" , _A = "</s>" , _A = "<pad>" , ):
"""simple docstring"""
__lowerCAmelCase = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
__lowerCAmelCase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
__lowerCAmelCase = token_dict["token"]
__lowerCAmelCase = Tokenizer(Unigram() )
__lowerCAmelCase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}" ) , " " ),
normalizers.Lowercase(),
] )
__lowerCAmelCase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_A , add_prefix_space=_A ),
pre_tokenizers.Digits(individual_digits=_A ),
pre_tokenizers.Punctuation(),
] )
__lowerCAmelCase = decoders.Metaspace(replacement=_A , add_prefix_space=_A )
__lowerCAmelCase = TemplateProcessing(
single=f"""$A {self.special_tokens['eos']['token']}""" , special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] , )
__lowerCAmelCase = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(_A , _A )
def __SCREAMING_SNAKE_CASE( self , _A , _A = 8_0_0_0 , _A = True , ):
"""simple docstring"""
__lowerCAmelCase = trainers.UnigramTrainer(
vocab_size=_A , special_tokens=self.special_tokens_list , show_progress=_A , )
if isinstance(_A , _A ):
__lowerCAmelCase = [files]
self._tokenizer.train(_A , trainer=_A )
self.add_unk_id()
def __SCREAMING_SNAKE_CASE( self , _A , _A = 8_0_0_0 , _A = True , ):
"""simple docstring"""
__lowerCAmelCase = trainers.UnigramTrainer(
vocab_size=_A , special_tokens=self.special_tokens_list , show_progress=_A , )
self._tokenizer.train_from_iterator(_A , trainer=_A )
self.add_unk_id()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = json.loads(self._tokenizer.to_str() )
__lowerCAmelCase = self.special_tokens["unk"]["id"]
__lowerCAmelCase = Tokenizer.from_str(json.dumps(_A ) )
| 552
| 1
|
from random import randint
from tempfile import TemporaryFile
import numpy as np
def a_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
lowerCAmelCase__ = 0
if start < end:
lowerCAmelCase__ = randint(lowercase_ , lowercase_ )
lowerCAmelCase__ = a[end]
lowerCAmelCase__ = a[pivot]
lowerCAmelCase__ = temp
lowerCAmelCase__ = _in_place_partition(lowercase_ , lowercase_ , lowercase_ )
count += _in_place_quick_sort(lowercase_ , lowercase_ , p - 1 )
count += _in_place_quick_sort(lowercase_ , p + 1 , lowercase_ )
return count
def a_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
lowerCAmelCase__ = 0
lowerCAmelCase__ = randint(lowercase_ , lowercase_ )
lowerCAmelCase__ = a[end]
lowerCAmelCase__ = a[pivot]
lowerCAmelCase__ = temp
lowerCAmelCase__ = start - 1
for index in range(lowercase_ , lowercase_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowerCAmelCase__ = new_pivot_index + 1
lowerCAmelCase__ = a[new_pivot_index]
lowerCAmelCase__ = a[index]
lowerCAmelCase__ = temp
lowerCAmelCase__ = a[new_pivot_index + 1]
lowerCAmelCase__ = a[end]
lowerCAmelCase__ = temp
return new_pivot_index + 1, count
__magic_name__ : Optional[int] = TemporaryFile()
__magic_name__ : List[Any] = 1_00 # 1000 elements are to be sorted
__magic_name__ : List[str] = 0, 1 # mean and standard deviation
__magic_name__ : List[str] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("""The array is""")
print(X)
outfile.seek(0) # using the same array
__magic_name__ : Union[str, Any] = np.load(outfile)
__magic_name__ : Optional[int] = len(M) - 1
__magic_name__ : int = _in_place_quick_sort(M, 0, r)
print(
"""No of Comparisons for 100 elements selected from a standard normal distribution"""
"""is :"""
)
print(z)
| 615
|
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class _a (unittest.TestCase , __magic_name__ ):
'''simple docstring'''
def __A ( self ):
A__ : List[str] = load_tool("""text-to-speech""" )
self.tool.setup()
def __A ( self ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
A__ : Tuple = self.tool("""hey""" )
A__ : Tuple = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
def __A ( self ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
A__ : Any = self.tool("""hey""" )
A__ : Optional[Any] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
| 456
| 0
|
"""simple docstring"""
import pprint
import requests
SCREAMING_SNAKE_CASE_ = 'https://zenquotes.io/api'
def lowercase ():
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def lowercase ():
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = random_quotes()
pprint.pprint(response)
| 712
|
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
def A__ ( self , snake_case_ ) -> Optional[int]:
with open(snake_case_ , encoding="""utf-8""" ) as input_file:
__lowerCAmelCase = re.compile(r"""(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)""" )
__lowerCAmelCase = input_file.read()
__lowerCAmelCase = regexp.search(snake_case_ )
return match
def A__ ( self , snake_case_ ) -> Union[str, Any]:
with open(snake_case_ , encoding="""utf-8""" ) as input_file:
__lowerCAmelCase = re.compile(r"""#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()""" , re.DOTALL )
__lowerCAmelCase = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__lowerCAmelCase = regexp.finditer(snake_case_ )
__lowerCAmelCase = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = Path("""./datasets""" )
__lowerCAmelCase = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(snake_case_ ) ):
raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" )
def A__ ( self ) -> Tuple:
__lowerCAmelCase = Path("""./datasets""" )
__lowerCAmelCase = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_print_statements(str(snake_case_ ) ):
raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 573
| 0
|
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
__magic_name__ : Tuple = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ (_a ):
def __init__( self : List[str] , *__lowerCamelCase : int , **__lowerCamelCase : Any ):
"""simple docstring"""
warnings.warn(
'''The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use DeformableDetrImageProcessor instead.''' , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
| 615
|
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def a_ ( __lowerCAmelCase ):
lowerCAmelCase__ = model.config
lowerCAmelCase__ = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=1_28 , )
lowerCAmelCase__ = MBartConfig(
is_decoder=__lowerCAmelCase , is_encoder_decoder=__lowerCAmelCase , add_cross_attention=__lowerCAmelCase , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=__lowerCAmelCase , add_final_layer_norm=__lowerCAmelCase , )
return encoder_config, decoder_config
def a_ ( __lowerCAmelCase ):
if "encoder.model" in name:
lowerCAmelCase__ = name.replace('''encoder.model''' , '''encoder''' )
if "decoder.model" in name:
lowerCAmelCase__ = name.replace('''decoder.model''' , '''decoder''' )
if "patch_embed.proj" in name:
lowerCAmelCase__ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCAmelCase__ = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if name.startswith('''encoder''' ):
if "layers" in name:
lowerCAmelCase__ = '''encoder.''' + name
if "attn.proj" in name:
lowerCAmelCase__ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "mask" not in name:
lowerCAmelCase__ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowerCAmelCase__ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowerCAmelCase__ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowerCAmelCase__ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCAmelCase__ = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
lowerCAmelCase__ = '''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
lowerCAmelCase__ = '''encoder.layernorm.bias'''
return name
def a_ ( __lowerCAmelCase , __lowerCAmelCase ):
for key in orig_state_dict.copy().keys():
lowerCAmelCase__ = orig_state_dict.pop(__lowerCAmelCase )
if "qkv" in key:
lowerCAmelCase__ = key.split('''.''' )
lowerCAmelCase__ = int(key_split[3] )
lowerCAmelCase__ = int(key_split[5] )
lowerCAmelCase__ = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCAmelCase__ = val[:dim, :]
lowerCAmelCase__ = val[dim : dim * 2, :]
lowerCAmelCase__ = val[-dim:, :]
else:
lowerCAmelCase__ = val[:dim]
lowerCAmelCase__ = val[dim : dim * 2]
lowerCAmelCase__ = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
lowerCAmelCase__ = val
return orig_state_dict
def a_ ( __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=False ):
# load original model
lowerCAmelCase__ = DonutModel.from_pretrained(__lowerCAmelCase ).eval()
# load HuggingFace model
lowerCAmelCase__ , lowerCAmelCase__ = get_configs(__lowerCAmelCase )
lowerCAmelCase__ = DonutSwinModel(__lowerCAmelCase )
lowerCAmelCase__ = MBartForCausalLM(__lowerCAmelCase )
lowerCAmelCase__ = VisionEncoderDecoderModel(encoder=__lowerCAmelCase , decoder=__lowerCAmelCase )
model.eval()
lowerCAmelCase__ = original_model.state_dict()
lowerCAmelCase__ = convert_state_dict(__lowerCAmelCase , __lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
# verify results on scanned document
lowerCAmelCase__ = load_dataset('''hf-internal-testing/example-documents''' )
lowerCAmelCase__ = dataset['''test'''][0]['''image'''].convert('''RGB''' )
lowerCAmelCase__ = XLMRobertaTokenizerFast.from_pretrained(__lowerCAmelCase , from_slow=__lowerCAmelCase )
lowerCAmelCase__ = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
lowerCAmelCase__ = DonutProcessor(__lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase__ = processor(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
lowerCAmelCase__ = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
lowerCAmelCase__ = '''When is the coffee break?'''
lowerCAmelCase__ = task_prompt.replace('''{user_input}''' , __lowerCAmelCase )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
lowerCAmelCase__ = '''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
lowerCAmelCase__ = '''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
lowerCAmelCase__ = '''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
lowerCAmelCase__ = '''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
lowerCAmelCase__ = '''hello world'''
else:
raise ValueError('''Model name not supported''' )
lowerCAmelCase__ = original_model.decoder.tokenizer(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors='''pt''' )[
'''input_ids'''
]
lowerCAmelCase__ = original_model.encoder.model.patch_embed(__lowerCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ = model.encoder.embeddings(__lowerCAmelCase )
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 )
# verify encoder hidden states
lowerCAmelCase__ = original_model.encoder(__lowerCAmelCase )
lowerCAmelCase__ = model.encoder(__lowerCAmelCase ).last_hidden_state
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-2 )
# verify decoder hidden states
lowerCAmelCase__ = original_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).logits
lowerCAmelCase__ = model(__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase ).logits
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
if __name__ == "__main__":
__magic_name__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""naver-clova-ix/donut-base-finetuned-docvqa""",
required=False,
type=str,
help="""Name of the original model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
required=False,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub.""",
)
__magic_name__ : Any = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 615
| 1
|
'''simple docstring'''
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
__UpperCAmelCase = datasets.utils.logging.get_logger(__name__)
@dataclass
class a__ ( datasets.BuilderConfig ):
'''simple docstring'''
lowercase__ : Optional[datasets.Features] = None
lowercase__ : str = "utf-8"
lowercase__ : Optional[str] = None
lowercase__ : Optional[str] = None
lowercase__ : bool = True # deprecated
lowercase__ : Optional[int] = None # deprecated
lowercase__ : int = 1_0 << 2_0 # 10MB
lowercase__ : Optional[bool] = None
class a__ ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
lowercase__ : Union[str, Any] = JsonConfig
def __SCREAMING_SNAKE_CASE ( self ) -> int:
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
lowerCAmelCase__ = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Union[str, Any]:
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
lowerCAmelCase__ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCamelCase_ , (str, list, tuple) ):
lowerCAmelCase__ = data_files
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = [files]
lowerCAmelCase__ = [dl_manager.iter_files(lowerCamelCase_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
lowerCAmelCase__ = []
for split_name, files in data_files.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = [files]
lowerCAmelCase__ = [dl_manager.iter_files(lowerCamelCase_ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCamelCase_ , gen_kwargs={'''files''': files} ) )
return splits
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
lowerCAmelCase__ = self.config.features.arrow_schema.field(lowerCamelCase_ ).type
lowerCAmelCase__ = pa_table.append_column(lowerCamelCase_ , pa.array([None] * len(lowerCamelCase_ ) , type=lowerCamelCase_ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
lowerCAmelCase__ = table_cast(lowerCamelCase_ , self.config.features.arrow_schema )
return pa_table
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str:
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCamelCase_ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(lowerCamelCase_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
lowerCAmelCase__ = json.load(lowerCamelCase_ )
# We keep only the field we are interested in
lowerCAmelCase__ = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(lowerCamelCase_ , (list, tuple) ):
lowerCAmelCase__ = set().union(*[row.keys() for row in dataset] )
lowerCAmelCase__ = {col: [row.get(lowerCamelCase_ ) for row in dataset] for col in keys}
else:
lowerCAmelCase__ = dataset
lowerCAmelCase__ = pa.Table.from_pydict(lowerCamelCase_ )
yield file_idx, self._cast_table(lowerCamelCase_ )
# If the file has one json object per line
else:
with open(lowerCamelCase_ , '''rb''' ) as f:
lowerCAmelCase__ = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
lowerCAmelCase__ = max(self.config.chunksize // 32 , 16 << 10 )
lowerCAmelCase__ = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
lowerCAmelCase__ = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(lowerCamelCase_ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
lowerCAmelCase__ = batch.decode(self.config.encoding , errors=lowerCamelCase_ ).encode('''utf-8''' )
try:
while True:
try:
lowerCAmelCase__ = paj.read_json(
io.BytesIO(lowerCamelCase_ ) , read_options=paj.ReadOptions(block_size=lowerCamelCase_ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(lowerCamelCase_ , pa.ArrowInvalid )
and "straddling" not in str(lowerCamelCase_ )
or block_size > len(lowerCamelCase_ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(lowerCamelCase_ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
lowerCamelCase_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
lowerCAmelCase__ = json.load(lowerCamelCase_ )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(lowerCamelCase_ )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(lowerCamelCase_ , lowerCamelCase_ ): # list is the only sequence type supported in JSON
try:
lowerCAmelCase__ = set().union(*[row.keys() for row in dataset] )
lowerCAmelCase__ = {col: [row.get(lowerCamelCase_ ) for row in dataset] for col in keys}
lowerCAmelCase__ = pa.Table.from_pydict(lowerCamelCase_ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(lowerCamelCase_ )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(lowerCamelCase_ )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(lowerCamelCase_ )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCamelCase_ )
batch_idx += 1
| 98
|
'''simple docstring'''
from math import pi, sqrt, tan
def _snake_case ( A ) -> float:
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def _snake_case ( A , A , A ) -> float:
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _snake_case ( A ) -> float:
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def _snake_case ( A ) -> float:
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def _snake_case ( A , A ) -> float:
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _snake_case ( A , A , A ) -> float:
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
lowerCAmelCase__ = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _snake_case ( A , A ) -> float:
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def _snake_case ( A , A ) -> float:
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(A , 2 ) * torus_radius * tube_radius
def _snake_case ( A , A ) -> float:
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def _snake_case ( A ) -> float:
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def _snake_case ( A , A ) -> float:
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def _snake_case ( A , A , A ) -> float:
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
lowerCAmelCase__ = (sidea + sidea + sidea) / 2
lowerCAmelCase__ = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _snake_case ( A , A ) -> float:
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def _snake_case ( A , A , A ) -> float:
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def _snake_case ( A ) -> float:
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def _snake_case ( A , A ) -> float:
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def _snake_case ( A , A ) -> float:
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def _snake_case ( A , A ) -> float:
if not isinstance(A , A ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(f"""Rectangle: {area_rectangle(10, 20) = }""")
print(f"""Square: {area_square(10) = }""")
print(f"""Triangle: {area_triangle(10, 10) = }""")
print(f"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""")
print(f"""Parallelogram: {area_parallelogram(10, 20) = }""")
print(f"""Rhombus: {area_rhombus(10, 20) = }""")
print(f"""Trapezium: {area_trapezium(10, 20, 30) = }""")
print(f"""Circle: {area_circle(20) = }""")
print(f"""Ellipse: {area_ellipse(10, 20) = }""")
print('''\nSurface Areas of various geometric shapes: \n''')
print(f"""Cube: {surface_area_cube(20) = }""")
print(f"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""")
print(f"""Sphere: {surface_area_sphere(20) = }""")
print(f"""Hemisphere: {surface_area_hemisphere(20) = }""")
print(f"""Cone: {surface_area_cone(10, 20) = }""")
print(f"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""")
print(f"""Cylinder: {surface_area_cylinder(10, 20) = }""")
print(f"""Torus: {surface_area_torus(20, 10) = }""")
print(f"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""")
print(f"""Square: {area_reg_polygon(4, 10) = }""")
print(f"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""")
| 98
| 1
|
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class a_ ( lowercase__ ):
def __init__( self , SCREAMING_SNAKE_CASE="" , SCREAMING_SNAKE_CASE="train" ) -> Any:
"""simple docstring"""
assert os.path.isdir(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = os.listdir(SCREAMING_SNAKE_CASE )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if not os.path.isfile(SCREAMING_SNAKE_CASE ):
continue
self.documents.append(SCREAMING_SNAKE_CASE )
def __len__( self ) -> Tuple:
"""simple docstring"""
return len(self.documents )
def __getitem__( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.documents[idx]
SCREAMING_SNAKE_CASE_ = document_path.split('/' )[-1]
with open(SCREAMING_SNAKE_CASE , encoding='utf-8' ) as source:
SCREAMING_SNAKE_CASE_ = source.read()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = process_story(SCREAMING_SNAKE_CASE )
return document_name, story_lines, summary_lines
def lowercase ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = list(filter(lambda SCREAMING_SNAKE_CASE : len(__SCREAMING_SNAKE_CASE ) != 0 , [line.strip() for line in raw_story.split('\n' )] ) )
# for some unknown reason some lines miss a period, add it
SCREAMING_SNAKE_CASE_ = [_add_missing_period(__SCREAMING_SNAKE_CASE ) for line in nonempty_lines]
# gather article lines
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = deque(__SCREAMING_SNAKE_CASE )
while True:
try:
SCREAMING_SNAKE_CASE_ = lines.popleft()
if element.startswith('@highlight' ):
break
story_lines.append(__SCREAMING_SNAKE_CASE )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
SCREAMING_SNAKE_CASE_ = list(filter(lambda SCREAMING_SNAKE_CASE : not t.startswith('@highlight' ) , __SCREAMING_SNAKE_CASE ) )
return story_lines, summary_lines
def lowercase ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = ['.', '!', '?', '...', '\'', '`', '\"', '\u2019', '\u2019', ')']
if line.startswith('@highlight' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
if len(__SCREAMING_SNAKE_CASE ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(__SCREAMING_SNAKE_CASE )) )
return sequence
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = torch.ones_like(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = sequence == pad_token_id
SCREAMING_SNAKE_CASE_ = 0
return mask
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = [tokenizer.encode(__SCREAMING_SNAKE_CASE ) for line in story_lines]
SCREAMING_SNAKE_CASE_ = [token for sentence in story_lines_token_ids for token in sentence]
SCREAMING_SNAKE_CASE_ = [tokenizer.encode(__SCREAMING_SNAKE_CASE ) for line in summary_lines]
SCREAMING_SNAKE_CASE_ = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = []
for sequence in batch:
SCREAMING_SNAKE_CASE_ = -1
SCREAMING_SNAKE_CASE_ = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(__SCREAMING_SNAKE_CASE )
return torch.tensor(__SCREAMING_SNAKE_CASE )
| 205
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=[30, 30] , lowerCAmelCase=2 , lowerCAmelCase=3 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=32 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=37 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=10 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=None , lowerCAmelCase=8 , lowerCAmelCase=10 , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = scope
UpperCAmelCase_ = n_targets
UpperCAmelCase_ = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
UpperCAmelCase_ = (image_size[1] // patch_size) * (image_size[0] // patch_size)
UpperCAmelCase_ = num_patches + 1 + self.num_detection_tokens
def A__ ( self ):
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
UpperCAmelCase_ = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
UpperCAmelCase_ = []
for i in range(self.batch_size ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=lowerCAmelCase )
UpperCAmelCase_ = torch.rand(self.n_targets , 4 , device=lowerCAmelCase )
labels.append(lowerCAmelCase )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def A__ ( self ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = YolosModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = YolosForObjectDetection(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = model(pixel_values=lowerCAmelCase )
UpperCAmelCase_ = model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
UpperCAmelCase_ = model(pixel_values=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def A__ ( self ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowercase__, lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowerCAmelCase_ : str = (
{'feature-extraction': YolosModel, 'object-detection': YolosForObjectDetection} if is_torch_available() else {}
)
lowerCAmelCase_ : str = False
lowerCAmelCase_ : str = False
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : Dict = False
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ):
UpperCAmelCase_ = super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
UpperCAmelCase_ = []
for i in range(self.model_tester.batch_size ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = torch.ones(
size=(self.model_tester.n_targets,) , device=lowerCAmelCase , dtype=torch.long )
UpperCAmelCase_ = torch.ones(
self.model_tester.n_targets , 4 , device=lowerCAmelCase , dtype=torch.float )
labels.append(lowerCAmelCase )
UpperCAmelCase_ = labels
return inputs_dict
def A__ ( self ):
UpperCAmelCase_ = YolosModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 )
def A__ ( self ):
self.config_tester.run_common_tests()
def A__ ( self ):
# YOLOS does not use inputs_embeds
pass
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) )
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = True
# in YOLOS, the seq_len is different
UpperCAmelCase_ = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase_ = outputs.attentions
self.assertEqual(len(lowerCAmelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase_ = outputs.attentions
self.assertEqual(len(lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
UpperCAmelCase_ = len(lowerCAmelCase )
# Check attention is always last and order is fine
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase_ = 1
self.assertEqual(out_len + added_hidden_states , len(lowerCAmelCase ) )
UpperCAmelCase_ = outputs.attentions
self.assertEqual(len(lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def A__ ( self ):
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase_ = outputs.hidden_states
UpperCAmelCase_ = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase )
# YOLOS has a different seq_length
UpperCAmelCase_ = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*lowerCAmelCase )
@slow
def A__ ( self ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = YolosModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def snake_case__ ( ) -> Optional[Any]:
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self ):
return AutoImageProcessor.from_pretrained("hustvl/yolos-small" ) if is_vision_available() else None
@slow
def A__ ( self ):
UpperCAmelCase_ = YolosForObjectDetection.from_pretrained("hustvl/yolos-small" ).to(lowerCAmelCase )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=lowerCAmelCase , return_tensors="pt" ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(inputs.pixel_values )
# verify outputs
UpperCAmelCase_ = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
UpperCAmelCase_ = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=lowerCAmelCase , )
UpperCAmelCase_ = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , lowerCAmelCase , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , lowerCAmelCase , atol=1e-4 ) )
# verify postprocessing
UpperCAmelCase_ = image_processor.post_process_object_detection(
lowerCAmelCase , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
UpperCAmelCase_ = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(lowerCAmelCase )
UpperCAmelCase_ = [75, 75, 17, 63, 17]
UpperCAmelCase_ = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(lowerCAmelCase )
self.assertEqual(len(results["scores"] ) , 5 )
self.assertTrue(torch.allclose(results["scores"] , lowerCAmelCase , atol=1e-4 ) )
self.assertSequenceEqual(results["labels"].tolist() , lowerCAmelCase )
self.assertTrue(torch.allclose(results["boxes"][0, :] , lowerCAmelCase ) )
| 579
| 0
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
set_seed(7_70)
SCREAMING_SNAKE_CASE__ : List[Any] = {
"c_attn": "att_proj",
"c_proj": "out_proj",
"c_fc": "in_proj",
"transformer.": "",
"h.": "layers.",
"ln_1": "layernorm_1",
"ln_2": "layernorm_2",
"ln_f": "layernorm_final",
"wpe": "position_embeds_layer",
"wte": "input_embeds_layer",
}
SCREAMING_SNAKE_CASE__ : int = {
"text_small": {
"repo_id": "suno/bark",
"file_name": "text.pt",
},
"coarse_small": {
"repo_id": "suno/bark",
"file_name": "coarse.pt",
},
"fine_small": {
"repo_id": "suno/bark",
"file_name": "fine.pt",
},
"text": {
"repo_id": "suno/bark",
"file_name": "text_2.pt",
},
"coarse": {
"repo_id": "suno/bark",
"file_name": "coarse_2.pt",
},
"fine": {
"repo_id": "suno/bark",
"file_name": "fine_2.pt",
},
}
SCREAMING_SNAKE_CASE__ : List[str] = os.path.dirname(os.path.abspath(__file__))
SCREAMING_SNAKE_CASE__ : List[str] = os.path.join(os.path.expanduser("""~"""), """.cache""")
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(os.getenv("""XDG_CACHE_HOME""", default_cache_dir), """suno""", """bark_v0""")
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
SCREAMING_SNAKE_CASE_ :Optional[Any] = model_type
if use_small:
key += "_small"
return os.path.join(_SCREAMING_SNAKE_CASE , REMOTE_MODEL_PATHS[key]['file_name'] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
hf_hub_download(repo_id=_SCREAMING_SNAKE_CASE , filename=_SCREAMING_SNAKE_CASE , local_dir=_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="text" ):
if model_type == "text":
SCREAMING_SNAKE_CASE_ :Tuple = BarkSemanticModel
SCREAMING_SNAKE_CASE_ :Union[str, Any] = BarkSemanticConfig
SCREAMING_SNAKE_CASE_ :Any = BarkSemanticGenerationConfig
elif model_type == "coarse":
SCREAMING_SNAKE_CASE_ :Union[str, Any] = BarkCoarseModel
SCREAMING_SNAKE_CASE_ :Dict = BarkCoarseConfig
SCREAMING_SNAKE_CASE_ :str = BarkCoarseGenerationConfig
elif model_type == "fine":
SCREAMING_SNAKE_CASE_ :Dict = BarkFineModel
SCREAMING_SNAKE_CASE_ :Optional[Any] = BarkFineConfig
SCREAMING_SNAKE_CASE_ :Tuple = BarkFineGenerationConfig
else:
raise NotImplementedError()
SCREAMING_SNAKE_CASE_ :List[str] = F'{model_type}_small' if use_small else model_type
SCREAMING_SNAKE_CASE_ :List[Any] = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(_SCREAMING_SNAKE_CASE ):
logger.info(F'{model_type} model not found, downloading into `{CACHE_DIR}`.' )
_download(model_info['repo_id'] , model_info['file_name'] )
SCREAMING_SNAKE_CASE_ :List[Any] = torch.load(_SCREAMING_SNAKE_CASE , map_location=_SCREAMING_SNAKE_CASE )
# this is a hack
SCREAMING_SNAKE_CASE_ :str = checkpoint['model_args']
if "input_vocab_size" not in model_args:
SCREAMING_SNAKE_CASE_ :int = model_args['vocab_size']
SCREAMING_SNAKE_CASE_ :int = model_args['vocab_size']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
SCREAMING_SNAKE_CASE_ :Optional[int] = model_args.pop('n_head' )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = model_args.pop('n_embd' )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = model_args.pop('n_layer' )
SCREAMING_SNAKE_CASE_ :Tuple = ConfigClass(**checkpoint['model_args'] )
SCREAMING_SNAKE_CASE_ :Optional[Any] = ModelClass(config=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :int = GenerationConfigClass()
SCREAMING_SNAKE_CASE_ :Optional[Any] = model_generation_config
SCREAMING_SNAKE_CASE_ :str = checkpoint['model']
# fixup checkpoint
SCREAMING_SNAKE_CASE_ :Tuple = '_orig_mod.'
for k, v in list(state_dict.items() ):
if k.startswith(_SCREAMING_SNAKE_CASE ):
# replace part of the key with corresponding layer name in HF implementation
SCREAMING_SNAKE_CASE_ :str = k[len(_SCREAMING_SNAKE_CASE ) :]
for old_layer_name in new_layer_name_dict:
SCREAMING_SNAKE_CASE_ :Tuple = new_k.replace(_SCREAMING_SNAKE_CASE , new_layer_name_dict[old_layer_name] )
SCREAMING_SNAKE_CASE_ :Optional[Any] = state_dict.pop(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :List[str] = set(state_dict.keys() ) - set(model.state_dict().keys() )
SCREAMING_SNAKE_CASE_ :Tuple = {k for k in extra_keys if not k.endswith('.attn.bias' )}
SCREAMING_SNAKE_CASE_ :List[Any] = set(model.state_dict().keys() ) - set(state_dict.keys() )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = {k for k in missing_keys if not k.endswith('.attn.bias' )}
if len(_SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(F'extra keys found: {extra_keys}' )
if len(_SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(F'missing keys: {missing_keys}' )
model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :List[Any] = model.num_parameters(exclude_embeddings=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Dict = checkpoint['best_val_loss'].item()
logger.info(F'model loaded: {round(n_params/1E6 , 1 )}M params, {round(_SCREAMING_SNAKE_CASE , 3 )} loss' )
model.eval()
model.to(_SCREAMING_SNAKE_CASE )
del checkpoint, state_dict
return model
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="text" ):
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
SCREAMING_SNAKE_CASE_ :int = 'cpu' # do conversion on cpu
SCREAMING_SNAKE_CASE_ :int = _get_ckpt_path(_SCREAMING_SNAKE_CASE , use_small=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :str = _load_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , model_type=_SCREAMING_SNAKE_CASE , use_small=_SCREAMING_SNAKE_CASE )
# load bark initial model
SCREAMING_SNAKE_CASE_ :Any = _bark_load_model(_SCREAMING_SNAKE_CASE , 'cpu' , model_type=_SCREAMING_SNAKE_CASE , use_small=_SCREAMING_SNAKE_CASE )
if model_type == "text":
SCREAMING_SNAKE_CASE_ :str = bark_model['model']
if model.num_parameters(exclude_embeddings=_SCREAMING_SNAKE_CASE ) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters' )
# check if same output as the bark model
SCREAMING_SNAKE_CASE_ :Tuple = 5
SCREAMING_SNAKE_CASE_ :Optional[Any] = 10
if model_type in ["text", "coarse"]:
SCREAMING_SNAKE_CASE_ :str = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = bark_model(_SCREAMING_SNAKE_CASE )[0]
SCREAMING_SNAKE_CASE_ :Any = model(_SCREAMING_SNAKE_CASE )
# take last logits
SCREAMING_SNAKE_CASE_ :int = output_new_model_total.logits[:, [-1], :]
else:
SCREAMING_SNAKE_CASE_ :Optional[Any] = 3
SCREAMING_SNAKE_CASE_ :Any = 8
SCREAMING_SNAKE_CASE_ :Optional[Any] = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
SCREAMING_SNAKE_CASE_ :int = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = bark_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :List[Any] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape' )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError('initial and new outputs are not equal' )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
SCREAMING_SNAKE_CASE_ :Any = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :int = BarkSemanticConfig.from_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , 'config.json' ) )
SCREAMING_SNAKE_CASE_ :Optional[int] = BarkCoarseConfig.from_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , 'config.json' ) )
SCREAMING_SNAKE_CASE_ :Optional[Any] = BarkFineConfig.from_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , 'config.json' ) )
SCREAMING_SNAKE_CASE_ :str = EncodecConfig.from_pretrained('facebook/encodec_24khz' )
SCREAMING_SNAKE_CASE_ :List[Any] = BarkSemanticModel.from_pretrained(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :int = BarkCoarseModel.from_pretrained(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Tuple = BarkFineModel.from_pretrained(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = EncodecModel.from_pretrained('facebook/encodec_24khz' )
SCREAMING_SNAKE_CASE_ :List[Any] = BarkConfig.from_sub_model_configs(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :List[str] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
SCREAMING_SNAKE_CASE_ :Optional[int] = BarkModel(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Dict = semantic
SCREAMING_SNAKE_CASE_ :Union[str, Any] = coarseAcoustic
SCREAMING_SNAKE_CASE_ :List[str] = fineAcoustic
SCREAMING_SNAKE_CASE_ :str = codec
SCREAMING_SNAKE_CASE_ :str = bark_generation_config
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
bark.save_pretrained(_SCREAMING_SNAKE_CASE , repo_id=_SCREAMING_SNAKE_CASE , push_to_hub=_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""model_type""", type=str, help="""text, coarse or fine.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--is_small""", action="""store_true""", help="""convert the small version instead of the large.""")
SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 707
|
'''simple docstring'''
from PIL import Image
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ :List[Any] = (259 * (level + 255)) / (255 * (259 - level))
def contrast(SCREAMING_SNAKE_CASE ) -> int:
return int(128 + factor * (c - 128) )
return img.point(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change contrast to 170
SCREAMING_SNAKE_CASE__ : int = change_contrast(img, 1_70)
cont_img.save("""image_data/lena_high_contrast.png""", format="""png""")
| 233
| 0
|
'''simple docstring'''
import unittest
from knapsack import knapsack as k
class a__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
__A= 0
__A= [0]
__A= [0]
__A= len(lowerCAmelCase_ )
self.assertEqual(k.knapsack(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , 0 )
__A= [60]
__A= [10]
__A= len(lowerCAmelCase_ )
self.assertEqual(k.knapsack(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , 0 )
def lowerCAmelCase ( self : str ) -> List[Any]:
__A= 3
__A= [1, 2, 3]
__A= [3, 2, 1]
__A= len(lowerCAmelCase_ )
self.assertEqual(k.knapsack(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , 5 )
def lowerCAmelCase ( self : List[Any] ) -> List[Any]:
__A= 50
__A= [60, 100, 120]
__A= [10, 20, 30]
__A= len(lowerCAmelCase_ )
self.assertEqual(k.knapsack(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , 220 )
if __name__ == "__main__":
unittest.main()
| 186
|
'''simple docstring'''
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
UpperCAmelCase__ = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
UpperCAmelCase__ = subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode('''utf-8''').split()
UpperCAmelCase__ = '''|'''.join(sys.argv[1:])
UpperCAmelCase__ = re.compile(rF"""^({joined_dirs}).*?\.py$""")
UpperCAmelCase__ = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 186
| 1
|
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
a_ : Union[str, Any] = ''''''
a_ : Dict = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
a_ : Optional[Any] = None # compression type in fsspec. ex: "gzip"
a_ : Tuple = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__(self , UpperCAmelCase = "" , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase):
'''simple docstring'''
super().__init__(self , **UpperCAmelCase)
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
__UpperCAmelCase =fsspec.open(
UpperCAmelCase , mode='''rb''' , protocol=UpperCAmelCase , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {}), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
__UpperCAmelCase =os.path.basename(self.file.path.split('''::''')[0])
__UpperCAmelCase =(
self.compressed_name[: self.compressed_name.rindex('''.''')]
if '.' in self.compressed_name
else self.compressed_name
)
__UpperCAmelCase =None
@classmethod
def A__ (cls , UpperCAmelCase):
'''simple docstring'''
return super()._strip_protocol(UpperCAmelCase).lstrip('''/''')
def A__ (self):
'''simple docstring'''
if self.dir_cache is None:
__UpperCAmelCase ={**self.file.fs.info(self.file.path), 'name': self.uncompressed_name}
__UpperCAmelCase ={f['name']: f}
def A__ (self , UpperCAmelCase):
'''simple docstring'''
return self.file.open().read()
def A__ (self , UpperCAmelCase , UpperCAmelCase = "rb" , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=None , **UpperCAmelCase , ):
'''simple docstring'''
__UpperCAmelCase =self._strip_protocol(UpperCAmelCase)
if mode != "rb":
raise ValueError(f"""Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'""")
return self.file.open()
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
a_ : int = '''bz2'''
a_ : Any = '''bz2'''
a_ : Union[str, Any] = '''.bz2'''
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
a_ : int = '''gzip'''
a_ : List[str] = '''gzip'''
a_ : Optional[Any] = '''.gz'''
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
a_ : Union[str, Any] = '''lz4'''
a_ : List[Any] = '''lz4'''
a_ : Optional[Any] = '''.lz4'''
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
a_ : Optional[Any] = '''xz'''
a_ : Tuple = '''xz'''
a_ : str = '''.xz'''
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
a_ : List[str] = '''zstd'''
a_ : Union[str, Any] = '''zstd'''
a_ : str = '''.zst'''
def __init__(self , UpperCAmelCase , UpperCAmelCase = "rb" , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = DEFAULT_BLOCK_SIZE , **UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
fo=UpperCAmelCase , mode=UpperCAmelCase , target_protocol=UpperCAmelCase , target_options=UpperCAmelCase , block_size=UpperCAmelCase , **UpperCAmelCase , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
__UpperCAmelCase =self.file.__enter__
class _SCREAMING_SNAKE_CASE :
def __init__(self , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =file_
def __enter__(self):
'''simple docstring'''
self._file.__enter__()
return self
def __exit__(self , *UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
self._file.__exit__(*UpperCAmelCase , **UpperCAmelCase)
def __iter__(self):
'''simple docstring'''
return iter(self._file)
def A__ (self):
'''simple docstring'''
return next(self._file)
def __getattr__(self , UpperCAmelCase):
'''simple docstring'''
return getattr(self._file , UpperCAmelCase)
def fixed_enter(*UpperCAmelCase , **UpperCAmelCase):
return WrappedFile(_enter(*UpperCAmelCase , **UpperCAmelCase))
__UpperCAmelCase =fixed_enter
| 703
|
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ ) -> Optional[int]:
assert isinstance(snake_case__ , snake_case__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ ) -> str:
__UpperCAmelCase =tmp_path / '''cache'''
__UpperCAmelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase =JsonDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_json_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
__UpperCAmelCase =tmp_path / '''cache'''
__UpperCAmelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase =features.copy() if features else default_expected_features
__UpperCAmelCase =(
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase =JsonDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_json_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
__UpperCAmelCase =tmp_path / '''cache'''
__UpperCAmelCase ={'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
__UpperCAmelCase =features.copy() if features else default_expected_features
__UpperCAmelCase =(
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase =JsonDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read()
assert isinstance(snake_case__ , snake_case__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ ) -> Optional[Any]:
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
__UpperCAmelCase ={'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
__UpperCAmelCase =features.copy()
__UpperCAmelCase =(
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase =tmp_path / '''cache'''
__UpperCAmelCase =JsonDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read()
assert isinstance(snake_case__ , snake_case__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ ) -> Dict:
__UpperCAmelCase =tmp_path / '''cache'''
__UpperCAmelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase =JsonDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read()
_check_json_dataset(snake_case__ , snake_case__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ ) -> Union[str, Any]:
if issubclass(snake_case__ , snake_case__ ):
__UpperCAmelCase =jsonl_path
elif issubclass(snake_case__ , snake_case__ ):
__UpperCAmelCase =[jsonl_path]
__UpperCAmelCase =tmp_path / '''cache'''
__UpperCAmelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase =JsonDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_json_dataset(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__=("train",) ) -> Dict:
assert isinstance(snake_case__ , snake_case__ )
for split in splits:
__UpperCAmelCase =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ ) -> Any:
__UpperCAmelCase =tmp_path / '''cache'''
__UpperCAmelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase =JsonDatasetReader({'''train''': jsonl_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_json_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ ) -> Union[str, Any]:
__UpperCAmelCase =tmp_path / '''cache'''
__UpperCAmelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase =features.copy() if features else default_expected_features
__UpperCAmelCase =(
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase =JsonDatasetReader({'''train''': jsonl_path} , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_json_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ ) -> Union[str, Any]:
if split:
__UpperCAmelCase ={split: jsonl_path}
else:
__UpperCAmelCase ='''train'''
__UpperCAmelCase ={'''train''': jsonl_path, '''test''': jsonl_path}
__UpperCAmelCase =tmp_path / '''cache'''
__UpperCAmelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase =JsonDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_json_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> int:
return json.load(snake_case__ )
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> Union[str, Any]:
return [json.loads(snake_case__ ) for line in buffer]
class _SCREAMING_SNAKE_CASE :
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)])
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCAmelCase , UpperCAmelCase , lines=UpperCAmelCase).write()
buffer.seek(0)
__UpperCAmelCase =load_json_function(UpperCAmelCase)
assert isinstance(UpperCAmelCase , UpperCAmelCase)
assert isinstance(exported_content[0] , UpperCAmelCase)
assert len(UpperCAmelCase) == 1_0
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789'''), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCAmelCase , UpperCAmelCase , lines=UpperCAmelCase , orient=UpperCAmelCase).write()
buffer.seek(0)
__UpperCAmelCase =load_json(UpperCAmelCase)
assert isinstance(UpperCAmelCase , UpperCAmelCase)
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCAmelCase , '''keys''') and not hasattr(exported_content[0] , '''keys''')
if len_at:
assert len(exported_content[len_at]) == 1_0
else:
assert len(UpperCAmelCase) == 1_0
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)])
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCAmelCase , UpperCAmelCase , lines=UpperCAmelCase , num_proc=2).write()
buffer.seek(0)
__UpperCAmelCase =load_json_function(UpperCAmelCase)
assert isinstance(UpperCAmelCase , UpperCAmelCase)
assert isinstance(exported_content[0] , UpperCAmelCase)
assert len(UpperCAmelCase) == 1_0
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789'''), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCAmelCase , UpperCAmelCase , lines=UpperCAmelCase , orient=UpperCAmelCase , num_proc=2).write()
buffer.seek(0)
__UpperCAmelCase =load_json(UpperCAmelCase)
assert isinstance(UpperCAmelCase , UpperCAmelCase)
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCAmelCase , '''keys''') and not hasattr(exported_content[0] , '''keys''')
if len_at:
assert len(exported_content[len_at]) == 1_0
else:
assert len(UpperCAmelCase) == 1_0
def A__ (self , UpperCAmelCase):
'''simple docstring'''
with pytest.raises(UpperCAmelCase):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCAmelCase , UpperCAmelCase , num_proc=0)
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')])
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =tmp_path_factory.mktemp('''data''') / f"""test.json.{extension}"""
__UpperCAmelCase =str(shared_datadir / f"""test_file.json.{extension}""")
JsonDatasetWriter(UpperCAmelCase , UpperCAmelCase , compression=UpperCAmelCase).write()
with fsspec.open(UpperCAmelCase , '''rb''' , compression='''infer''') as f:
__UpperCAmelCase =f.read()
with fsspec.open(UpperCAmelCase , '''rb''' , compression='''infer''') as f:
__UpperCAmelCase =f.read()
assert exported_content == original_content
| 142
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : int = 'codegen'
__lowerCamelCase : Dict = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__(self , A=50_400 , A=2_048 , A=2_048 , A=4_096 , A=28 , A=16 , A=64 , A=None , A="gelu_new" , A=0.0 , A=0.0 , A=0.0 , A=1E-5 , A=0.02 , A=True , A=50_256 , A=50_256 , A=False , **A , ) -> Optional[Any]:
"""simple docstring"""
_a = vocab_size
_a = n_ctx
_a = n_positions
_a = n_embd
_a = n_layer
_a = n_head
_a = n_inner
_a = rotary_dim
_a = activation_function
_a = resid_pdrop
_a = embd_pdrop
_a = attn_pdrop
_a = layer_norm_epsilon
_a = initializer_range
_a = use_cache
_a = bos_token_id
_a = eos_token_id
super().__init__(
bos_token_id=A , eos_token_id=A , tie_word_embeddings=A , **A )
class __A ( A ):
'''simple docstring'''
def __init__(self , A , A = "default" , A = None , A = False , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(A , task=A , patching_specs=A , use_past=A )
if not getattr(self._config , '''pad_token_id''' , A ):
# TODO: how to do that better?
_a = 0
@property
def a__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
_a = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(A , direction='''inputs''' )
_a = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
_a = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def a__ (self ) -> int:
"""simple docstring"""
return self._config.n_layer
@property
def a__ (self ) -> int:
"""simple docstring"""
return self._config.n_head
def a__ (self , A , A = -1 , A = -1 , A = False , A = None , ) -> Mapping[str, Any]:
"""simple docstring"""
_a = super(A , self ).generate_dummy_inputs(
A , batch_size=A , seq_length=A , is_pair=A , framework=A )
# We need to order the input in the way they appears in the forward()
_a = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_a , _a = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
_a = seqlen + 2
_a = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_a = [
(torch.zeros(A ), torch.zeros(A )) for _ in range(self.num_layers )
]
_a = common_inputs['''attention_mask''']
if self.use_past:
_a = ordered_inputs['''attention_mask'''].dtype
_a = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(A , A , dtype=A )] , dim=1 )
return ordered_inputs
@property
def a__ (self ) -> int:
"""simple docstring"""
return 13
| 11
|
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : Tuple = (DDPMScheduler,)
def _lowercase ( self , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**_lowercase )
return config
def _lowercase ( self ):
"""simple docstring"""
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=_lowercase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_lowercase , prediction_type=_lowercase , sample_max_value=_lowercase , )
def _lowercase ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = len(_lowercase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowercase ) ):
# 1. predict noise residual
_lowerCAmelCase = model(_lowercase , _lowercase )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCAmelCase = pred_prev_sample
_lowerCAmelCase = torch.sum(torch.abs(_lowercase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = len(_lowercase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowercase ) ):
# 1. predict noise residual
_lowerCAmelCase = model(_lowercase , _lowercase )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCAmelCase = pred_prev_sample
_lowerCAmelCase = torch.sum(torch.abs(_lowercase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_lowercase )
_lowerCAmelCase = scheduler.timesteps
for i, timestep in enumerate(_lowercase ):
if i == len(_lowercase ) - 1:
_lowerCAmelCase = -1
else:
_lowerCAmelCase = timesteps[i + 1]
_lowerCAmelCase = scheduler.previous_timestep(_lowercase )
_lowerCAmelCase = prev_t.item()
self.assertEqual(_lowercase , _lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = [100, 87, 50, 51, 0]
with self.assertRaises(_lowercase , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = [100, 87, 50, 1, 0]
_lowerCAmelCase = len(_lowercase )
with self.assertRaises(_lowercase , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=_lowercase , timesteps=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_lowercase , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=_lowercase )
| 5
| 0
|
'''simple docstring'''
from collections.abc import Sequence
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase = False ) -> float:
'''simple docstring'''
if not arr:
return 0
__SCREAMING_SNAKE_CASE = 0 if allow_empty_subarrays else float("""-inf""" )
__SCREAMING_SNAKE_CASE = 0.0
for num in arr:
__SCREAMING_SNAKE_CASE = max(0 if allow_empty_subarrays else num , curr_sum + num )
__SCREAMING_SNAKE_CASE = max(__UpperCAmelCase , __UpperCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
a = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'''{max_subarray_sum(nums) = }''')
| 701
|
'''simple docstring'''
import sys
from collections import defaultdict
class __a :
def __init__( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : List[Any] ):
'''simple docstring'''
return self.node_position[vertex]
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : str ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = pos
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Any ):
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__SCREAMING_SNAKE_CASE = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__SCREAMING_SNAKE_CASE = 2 * start + 1
else:
__SCREAMING_SNAKE_CASE = 2 * start + 2
if heap[smallest_child] < heap[start]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = heap[smallest_child], positions[smallest_child]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (
heap[start],
positions[start],
)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = temp, tempa
__SCREAMING_SNAKE_CASE = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] ,self.get_position(positions[start] ) )
self.set_position(positions[start] ,lowerCamelCase )
self.top_to_bottom(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Any ,lowerCamelCase : int ,lowerCamelCase : List[str] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = position[index]
while index != 0:
__SCREAMING_SNAKE_CASE = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__SCREAMING_SNAKE_CASE = heap[parent]
__SCREAMING_SNAKE_CASE = position[parent]
self.set_position(position[parent] ,lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE = val
__SCREAMING_SNAKE_CASE = temp
self.set_position(lowerCamelCase ,lowerCamelCase )
break
__SCREAMING_SNAKE_CASE = parent
else:
__SCREAMING_SNAKE_CASE = val
__SCREAMING_SNAKE_CASE = temp
self.set_position(lowerCamelCase ,0 )
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : List[Any] ,lowerCamelCase : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = len(lowerCamelCase ) // 2 - 1
for i in range(lowerCamelCase ,-1 ,-1 ):
self.top_to_bottom(lowerCamelCase ,lowerCamelCase ,len(lowerCamelCase ) ,lowerCamelCase )
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Optional[int] ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = positions[0]
__SCREAMING_SNAKE_CASE = sys.maxsize
self.top_to_bottom(lowerCamelCase ,0 ,len(lowerCamelCase ) ,lowerCamelCase )
return temp
def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = Heap()
__SCREAMING_SNAKE_CASE = [0] * len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = [-1] * len(__UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__SCREAMING_SNAKE_CASE = [] # Heap of Distance of vertices from their neighboring vertex
__SCREAMING_SNAKE_CASE = []
for vertex in range(len(__UpperCAmelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(__UpperCAmelCase )
heap.node_position.append(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = distance
heap.heapify(__UpperCAmelCase , __UpperCAmelCase )
for _ in range(1 , len(__UpperCAmelCase ) ):
__SCREAMING_SNAKE_CASE = heap.delete_minimum(__UpperCAmelCase , __UpperCAmelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__SCREAMING_SNAKE_CASE = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__UpperCAmelCase )]
):
__SCREAMING_SNAKE_CASE = distance
heap.bottom_to_top(
__UpperCAmelCase , heap.get_position(__UpperCAmelCase ) , __UpperCAmelCase , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
a = int(input("Enter number of edges: ").strip())
a = defaultdict(list)
for _ in range(edges_number):
a = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 13
| 0
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class lowerCamelCase ( unittest.TestCase ):
def snake_case__ ( self :Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = BlipImageProcessor()
SCREAMING_SNAKE_CASE = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
SCREAMING_SNAKE_CASE = BlipaProcessor(lowercase , lowercase )
processor.save_pretrained(self.tmpdirname )
def snake_case__ ( self :Optional[Any] , **lowercase :Tuple ) -> Optional[Any]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).tokenizer
def snake_case__ ( self :List[Any] , **lowercase :Any ) -> Union[str, Any]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).image_processor
def snake_case__ ( self :Dict ) -> List[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def snake_case__ ( self :Optional[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case__ ( self :List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 )
SCREAMING_SNAKE_CASE = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
def snake_case__ ( self :List[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipaProcessor(tokenizer=lowercase , image_processor=lowercase )
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = image_processor(lowercase , return_tensors='''np''' )
SCREAMING_SNAKE_CASE = processor(images=lowercase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case__ ( self :int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipaProcessor(tokenizer=lowercase , image_processor=lowercase )
SCREAMING_SNAKE_CASE = '''lower newer'''
SCREAMING_SNAKE_CASE = processor(text=lowercase )
SCREAMING_SNAKE_CASE = tokenizer(lowercase , return_token_type_ids=lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case__ ( self :Optional[int] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipaProcessor(tokenizer=lowercase , image_processor=lowercase )
SCREAMING_SNAKE_CASE = '''lower newer'''
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = processor(text=lowercase , images=lowercase )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def snake_case__ ( self :Dict ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipaProcessor(tokenizer=lowercase , image_processor=lowercase )
SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE = processor.batch_decode(lowercase )
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(lowercase )
self.assertListEqual(lowercase , lowercase )
def snake_case__ ( self :Optional[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipaProcessor(tokenizer=lowercase , image_processor=lowercase )
SCREAMING_SNAKE_CASE = '''lower newer'''
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = processor(text=lowercase , images=lowercase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 201
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCAmelCase = {
'configuration_bridgetower': [
'BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BridgeTowerConfig',
'BridgeTowerTextConfig',
'BridgeTowerVisionConfig',
],
'processing_bridgetower': ['BridgeTowerProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['BridgeTowerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST',
'BridgeTowerForContrastiveLearning',
'BridgeTowerForImageAndTextRetrieval',
'BridgeTowerForMaskedLM',
'BridgeTowerModel',
'BridgeTowerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 201
| 1
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a__ ( __magic_name__ , unittest.TestCase ):
lowercase_ = LDMTextToImagePipeline
lowercase_ = TEXT_TO_IMAGE_PARAMS - {
"negative_prompt",
"negative_prompt_embeds",
"cross_attention_kwargs",
"prompt_embeds",
}
lowercase_ = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"callback",
"callback_steps",
}
lowercase_ = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase_ = False
def a_ ( self : Union[str, Any]):
"""simple docstring"""
torch.manual_seed(0)
__UpperCAmelCase : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
__UpperCAmelCase : Dict = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , )
torch.manual_seed(0)
__UpperCAmelCase : Optional[int] = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , latent_channels=4 , )
torch.manual_seed(0)
__UpperCAmelCase : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__UpperCAmelCase : List[str] = CLIPTextModel(UpperCamelCase_)
__UpperCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
__UpperCAmelCase : Optional[int] = {
"unet": unet,
"scheduler": scheduler,
"vqvae": vae,
"bert": text_encoder,
"tokenizer": tokenizer,
}
return components
def a_ ( self : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple=0):
"""simple docstring"""
if str(UpperCamelCase_).startswith("mps"):
__UpperCAmelCase : str = torch.manual_seed(UpperCamelCase_)
else:
__UpperCAmelCase : Tuple = torch.Generator(device=UpperCamelCase_).manual_seed(UpperCamelCase_)
__UpperCAmelCase : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : Any = "cpu" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : Union[str, Any] = self.get_dummy_components()
__UpperCAmelCase : Union[str, Any] = LDMTextToImagePipeline(**UpperCamelCase_)
pipe.to(UpperCamelCase_)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : int = self.get_dummy_inputs(UpperCamelCase_)
__UpperCAmelCase : Dict = pipe(**UpperCamelCase_).images
__UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
__UpperCAmelCase : List[str] = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def a_ ( self : Optional[int]):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : int=torch.floataa , UpperCamelCase_ : Dict=0):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = torch.manual_seed(UpperCamelCase_)
__UpperCAmelCase : Optional[Any] = np.random.RandomState(UpperCamelCase_).standard_normal((1, 4, 32, 32))
__UpperCAmelCase : str = torch.from_numpy(UpperCamelCase_).to(device=UpperCamelCase_ , dtype=UpperCamelCase_)
__UpperCAmelCase : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def a_ ( self : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : str = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256").to(UpperCamelCase_)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : Optional[int] = self.get_inputs(UpperCamelCase_)
__UpperCAmelCase : Union[str, Any] = pipe(**UpperCamelCase_).images
__UpperCAmelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
__UpperCAmelCase : List[Any] = np.array([0.51825, 0.52850, 0.52543, 0.54258, 0.52304, 0.52569, 0.54363, 0.55276, 0.56878])
__UpperCAmelCase : List[Any] = np.abs(expected_slice - image_slice).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class a__ ( unittest.TestCase ):
def a_ ( self : Optional[Any]):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : int=torch.floataa , UpperCamelCase_ : Optional[int]=0):
"""simple docstring"""
__UpperCAmelCase : str = torch.manual_seed(UpperCamelCase_)
__UpperCAmelCase : Tuple = np.random.RandomState(UpperCamelCase_).standard_normal((1, 4, 32, 32))
__UpperCAmelCase : Optional[Any] = torch.from_numpy(UpperCamelCase_).to(device=UpperCamelCase_ , dtype=UpperCamelCase_)
__UpperCAmelCase : Dict = {
"prompt": "A painting of a squirrel eating a burger",
"latents": latents,
"generator": generator,
"num_inference_steps": 50,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def a_ ( self : Dict):
"""simple docstring"""
__UpperCAmelCase : str = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256").to(UpperCamelCase_)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : Union[str, Any] = self.get_inputs(UpperCamelCase_)
__UpperCAmelCase : List[str] = pipe(**UpperCamelCase_).images[0]
__UpperCAmelCase : int = load_numpy(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy")
__UpperCAmelCase : int = np.abs(expected_image - image).max()
assert max_diff < 1e-3
| 710
|
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
A = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
A = """ def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
"""
class a__ ( unittest.TestCase ):
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : List[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , "models/bert/"))
__UpperCAmelCase : Tuple = self.transformer_dir
shutil.copy(
os.path.join(UpperCamelCase_ , "src/transformers/models/bert/modeling_bert.py") , os.path.join(self.transformer_dir , "models/bert/modeling_bert.py") , )
def a_ ( self : int):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = "src/transformers"
shutil.rmtree(self.transformer_dir)
def a_ ( self : str , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str]=None):
"""simple docstring"""
__UpperCAmelCase : str = comment + F"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
__UpperCAmelCase : str = comment + F"\nclass {class_name}(nn.Module):\n" + overwrite_result
__UpperCAmelCase : Optional[int] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119)
__UpperCAmelCase : Dict = black.format_str(UpperCamelCase_ , mode=UpperCamelCase_)
__UpperCAmelCase : Dict = os.path.join(self.transformer_dir , "new_code.py")
with open(UpperCamelCase_ , "w" , newline="\n") as f:
f.write(UpperCamelCase_)
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(UpperCamelCase_)) == 0)
else:
check_copies.is_copy_consistent(f.name , overwrite=UpperCamelCase_)
with open(UpperCamelCase_ , "r") as f:
self.assertTrue(f.read() , UpperCamelCase_)
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : Any = check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead")
self.assertEqual(UpperCamelCase_ , UpperCamelCase_)
def a_ ( self : Optional[int]):
"""simple docstring"""
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , UpperCamelCase_ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , re.sub("Bert" , "TestModel" , UpperCamelCase_) , )
# Copy consistency with a really long name
__UpperCAmelCase : Optional[Any] = "TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , F"{long_class_name}LMPredictionHead" , re.sub("Bert" , UpperCamelCase_ , UpperCamelCase_) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , UpperCamelCase_ , overwrite_result=re.sub("Bert" , "TestModel" , UpperCamelCase_) , )
def a_ ( self : Dict):
"""simple docstring"""
__UpperCAmelCase : List[str] = check_copies.LOCALIZED_READMES["README_zh-hans.md"]
__UpperCAmelCase : str = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"
" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"
" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"
" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"
" Luong, Quoc V. Le, Christopher D. Manning."
)
__UpperCAmelCase : List[str] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
__UpperCAmelCase : str = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"
" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"
" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"
" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"
" Christopher D. Manning 发布。\n"
)
__UpperCAmelCase , __UpperCAmelCase : Any = check_copies.convert_to_localized_md(
UpperCamelCase_ , UpperCamelCase_ , localized_readme["format_model_list"])
self.assertFalse(UpperCamelCase_)
self.assertEqual(UpperCamelCase_ , UpperCamelCase_)
__UpperCAmelCase , __UpperCAmelCase : int = check_copies.convert_to_localized_md(
UpperCamelCase_ , UpperCamelCase_ , localized_readme["format_model_list"])
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(UpperCamelCase_)
__UpperCAmelCase : str = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."
)
__UpperCAmelCase : Optional[int] = (
"1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"
" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
__UpperCAmelCase : int = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
__UpperCAmelCase , __UpperCAmelCase : List[str] = check_copies.convert_to_localized_md(
UpperCamelCase_ , UpperCamelCase_ , localized_readme["format_model_list"])
# Check if the model link is synchronized.
self.assertEqual(UpperCamelCase_ , UpperCamelCase_)
| 487
| 0
|
"""simple docstring"""
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
snake_case = get_tests_dir('''fixtures/dummy-config.json''')
class UpperCAmelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_snake_case = 0
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec('''transformers.models.auto''' ) )
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_snake_case = AutoConfig.from_pretrained('''bert-base-uncased''' )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_snake_case = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = AutoConfig.for_model('''roberta''' )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
_snake_case = os.path.join(__lowerCamelCase , '''fake-roberta''' )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , '''config.json''' ) , '''w''' ) as f:
f.write(json.dumps({} ) )
_snake_case = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertEqual(type(__lowerCamelCase ) , __lowerCamelCase )
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
# Wrong model type will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoConfig.register('''model''' , __lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoConfig.register('''bert''' , __lowerCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
_snake_case = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCamelCase )
_snake_case = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
with self.assertRaisesRegex(
__lowerCamelCase , '''bert-base is not a local folder and is not a valid model identifier''' ):
_snake_case = AutoConfig.from_pretrained('''bert-base''' )
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
with self.assertRaisesRegex(
__lowerCamelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
_snake_case = AutoConfig.from_pretrained(__lowerCamelCase , revision='''aaaaaa''' )
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
with self.assertRaisesRegex(
__lowerCamelCase , '''hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.''' , ):
_snake_case = AutoConfig.from_pretrained('''hf-internal-testing/no-config-test-repo''' )
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowerCamelCase ):
_snake_case = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCamelCase ):
_snake_case = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=__lowerCamelCase )
_snake_case = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(config.__class__.__name__ , '''NewModelConfig''' )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCamelCase )
_snake_case = AutoConfig.from_pretrained(__lowerCamelCase , trust_remote_code=__lowerCamelCase )
self.assertEqual(reloaded_config.__class__.__name__ , '''NewModelConfig''' )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : List[Any] = '''new-model'''
try:
AutoConfig.register('''new-model''' , __lowerCamelCase )
# If remote code is not set, the default is to use local
_snake_case = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' )
self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''' )
# If remote code is disabled, we load the local one.
_snake_case = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''' )
# If remote is enabled, we load from the Hub
_snake_case = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(config.__class__.__name__ , '''NewModelConfig''' )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 103
|
"""simple docstring"""
from copy import deepcopy
class UpperCAmelCase :
def __init__( self : Optional[Any] , __lowerCamelCase : list[int] | None = None , __lowerCamelCase : int | None = None ):
"""simple docstring"""
if arr is None and size is not None:
_snake_case = size
_snake_case = [0] * size
elif arr is not None:
self.init(__lowerCamelCase )
else:
raise ValueError('''Either arr or size must be specified''' )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : list[int] ):
"""simple docstring"""
_snake_case = len(__lowerCamelCase )
_snake_case = deepcopy(__lowerCamelCase )
for i in range(1 , self.size ):
_snake_case = self.next_(__lowerCamelCase )
if j < self.size:
self.tree[j] += self.tree[i]
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
_snake_case = self.next_(__lowerCamelCase )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : int ):
"""simple docstring"""
return index + (index & (-index))
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : int ):
"""simple docstring"""
return index - (index & (-index))
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
_snake_case = self.next_(__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
self.add(__lowerCamelCase , value - self.get(__lowerCamelCase ) )
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : int ):
"""simple docstring"""
if right == 0:
return 0
_snake_case = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
_snake_case = self.prev(__lowerCamelCase )
return result
def __UpperCAmelCase ( self : str , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
return self.prefix(__lowerCamelCase ) - self.prefix(__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : int ):
"""simple docstring"""
return self.query(__lowerCamelCase , index + 1 )
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : int ):
"""simple docstring"""
value -= self.tree[0]
if value < 0:
return -1
_snake_case = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
_snake_case = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 103
| 1
|
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A : List[str] = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
A : Any = 2_5_0_0_0_4
A : Dict = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class A (SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Tuple = MBartTokenizer
__lowerCamelCase : str = MBartTokenizerFast
__lowerCamelCase : List[Any] = True
__lowerCamelCase : List[Any] = True
def a_ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
A__ = MBartTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def a_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
A__ = MBartTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase )
A__ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__lowerCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
A__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A__ = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
A__ = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def a_ ( self : List[str] ) -> str:
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
A__ = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A__ = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
A__ = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
A__ = tempfile.mkdtemp()
A__ = tokenizer_r.save_pretrained(__lowerCAmelCase )
A__ = tokenizer_p.save_pretrained(__lowerCAmelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
A__ = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase )
# Checks everything loads correctly in the same way
A__ = tokenizer_r.from_pretrained(__lowerCAmelCase )
A__ = tokenizer_p.from_pretrained(__lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowerCAmelCase )
# Save tokenizer rust, legacy_format=True
A__ = tempfile.mkdtemp()
A__ = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase )
A__ = tokenizer_p.save_pretrained(__lowerCAmelCase )
# Checks it save with the same files
self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase )
# Checks everything loads correctly in the same way
A__ = tokenizer_r.from_pretrained(__lowerCAmelCase )
A__ = tokenizer_p.from_pretrained(__lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase ) )
shutil.rmtree(__lowerCAmelCase )
# Save tokenizer rust, legacy_format=False
A__ = tempfile.mkdtemp()
A__ = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase )
A__ = tokenizer_p.save_pretrained(__lowerCAmelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
A__ = tokenizer_r.from_pretrained(__lowerCAmelCase )
A__ = tokenizer_p.from_pretrained(__lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase ) )
shutil.rmtree(__lowerCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class A (unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = '''facebook/mbart-large-en-ro'''
__lowerCamelCase : List[Any] = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
__lowerCamelCase : str = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
__lowerCamelCase : int = [8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2, EN_CODE]
@classmethod
def a_ ( cls : Optional[Any] ) -> List[str]:
"""simple docstring"""
A__ = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
A__ = 1
return cls
def a_ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 25_00_20 )
def a_ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
A__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __lowerCAmelCase )
def a_ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
self.assertIn(__lowerCAmelCase , self.tokenizer.all_special_ids )
A__ = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
A__ = self.tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
A__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , __lowerCAmelCase )
def a_ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , __lowerCAmelCase )
A__ = 10
A__ = self.tokenizer(__lowerCAmelCase , max_length=__lowerCAmelCase , truncation=__lowerCAmelCase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __lowerCAmelCase )
self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase )
def a_ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [25_00_26, 25_00_01] )
def a_ ( self : List[str] ) -> int:
"""simple docstring"""
A__ = tempfile.mkdtemp()
A__ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__lowerCAmelCase )
A__ = MBartTokenizer.from_pretrained(__lowerCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowerCAmelCase )
@require_torch
def a_ ( self : List[str] ) -> int:
"""simple docstring"""
A__ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__lowerCAmelCase , return_tensors="""pt""" )
A__ = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def a_ ( self : int ) -> List[str]:
"""simple docstring"""
A__ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
A__ = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
A__ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __lowerCAmelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def a_ ( self : Dict ) -> int:
"""simple docstring"""
A__ = self.tokenizer(self.src_text , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=3 , return_tensors="""pt""" )
A__ = self.tokenizer(
text_target=self.tgt_text , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=10 , return_tensors="""pt""" )
A__ = targets["""input_ids"""]
A__ = shift_tokens_right(__lowerCAmelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def a_ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
A__ = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , {
# A, test, EOS, en_XX
"""input_ids""": [[62, 30_34, 2, 25_00_04]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 25_00_01,
} , )
| 247
|
from ..utils import DummyObject, requires_backends
class A (metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : Any = ['''keras_nlp''']
def __init__( self : Any , *__lowerCAmelCase : Any , **__lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""keras_nlp"""] )
| 247
| 1
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
__a : Tuple = """encoder-decoder"""
__a : Tuple = True
def __init__( self, **snake_case__ ) -> Tuple:
"""simple docstring"""
super().__init__(**snake_case__ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowercase_ : Optional[Any] = kwargs.pop("""encoder""" )
lowercase_ : List[str] = encoder_config.pop("""model_type""" )
lowercase_ : Optional[int] = kwargs.pop("""decoder""" )
lowercase_ : Optional[Any] = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
lowercase_ : int = AutoConfig.for_model(snake_case__, **snake_case__ )
lowercase_ : Union[str, Any] = AutoConfig.for_model(snake_case__, **snake_case__ )
lowercase_ : List[str] = True
@classmethod
def snake_case__ ( cls, snake_case__, snake_case__, **snake_case__ ) -> PretrainedConfig:
"""simple docstring"""
logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
lowercase_ : List[Any] = True
lowercase_ : Optional[Any] = True
return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **snake_case__ )
def snake_case__ ( self ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : str = copy.deepcopy(self.__dict__ )
lowercase_ : Optional[Any] = self.encoder.to_dict()
lowercase_ : Union[str, Any] = self.decoder.to_dict()
lowercase_ : Dict = self.__class__.model_type
return output
| 458
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""google/canine-s""": """https://huggingface.co/google/canine-s/resolve/main/config.json""",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
__a : Optional[int] = """canine"""
def __init__( self, snake_case__=7_68, snake_case__=12, snake_case__=12, snake_case__=30_72, snake_case__="gelu", snake_case__=0.1, snake_case__=0.1, snake_case__=1_63_84, snake_case__=16, snake_case__=0.02, snake_case__=1E-12, snake_case__=0, snake_case__=0XE_0_0_0, snake_case__=0XE_0_0_1, snake_case__=4, snake_case__=4, snake_case__=8, snake_case__=1_63_84, snake_case__=1_28, **snake_case__, ) -> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=snake_case__, bos_token_id=snake_case__, eos_token_id=snake_case__, **snake_case__ )
lowercase_ : int = max_position_embeddings
lowercase_ : Union[str, Any] = hidden_size
lowercase_ : Optional[Any] = num_hidden_layers
lowercase_ : str = num_attention_heads
lowercase_ : List[Any] = intermediate_size
lowercase_ : Optional[Any] = hidden_act
lowercase_ : List[str] = hidden_dropout_prob
lowercase_ : Any = attention_probs_dropout_prob
lowercase_ : Union[str, Any] = initializer_range
lowercase_ : Optional[Any] = type_vocab_size
lowercase_ : Dict = layer_norm_eps
# Character config:
lowercase_ : Optional[int] = downsampling_rate
lowercase_ : List[str] = upsampling_kernel_size
lowercase_ : int = num_hash_functions
lowercase_ : List[str] = num_hash_buckets
lowercase_ : List[str] = local_transformer_stride
| 458
| 1
|
_lowerCamelCase : Any = 256
# Modulus to hash a string
_lowerCamelCase : Dict = 1_000_003
def __a ( __lowerCAmelCase , __lowerCAmelCase ) -> bool:
SCREAMING_SNAKE_CASE : int = len(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : int = len(__lowerCAmelCase )
if p_len > t_len:
return False
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
# Calculating the hash of pattern and substring of text
for i in range(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE : Tuple = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
SCREAMING_SNAKE_CASE : Optional[int] = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
SCREAMING_SNAKE_CASE : Optional[Any] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
SCREAMING_SNAKE_CASE : Optional[int] = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __a ( ) -> None:
SCREAMING_SNAKE_CASE : Union[str, Any] = 'abc1abc12'
SCREAMING_SNAKE_CASE : Optional[int] = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
SCREAMING_SNAKE_CASE : Tuple = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase ) and not rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
# Test 2)
SCREAMING_SNAKE_CASE : Optional[int] = 'ABABX'
SCREAMING_SNAKE_CASE : Optional[Any] = 'ABABZABABYABABX'
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
# Test 3)
SCREAMING_SNAKE_CASE : Optional[Any] = 'AAAB'
SCREAMING_SNAKE_CASE : Dict = 'ABAAAAAB'
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
# Test 4)
SCREAMING_SNAKE_CASE : Union[str, Any] = 'abcdabcy'
SCREAMING_SNAKE_CASE : Any = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
# Test 5)
SCREAMING_SNAKE_CASE : List[Any] = 'Lü'
SCREAMING_SNAKE_CASE : Dict = 'Lüsai'
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE : List[str] = 'Lue'
assert not rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 721
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
_lowerCamelCase : List[str] = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class lowercase ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = 'transfo-xl'
UpperCAmelCase : List[str] = ['mems']
UpperCAmelCase : Optional[Any] = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Optional[int] , snake_case : Tuple=267735 , snake_case : Optional[Any]=[20000, 40000, 200000] , snake_case : List[Any]=1024 , snake_case : List[Any]=1024 , snake_case : List[Any]=16 , snake_case : int=64 , snake_case : Optional[int]=4096 , snake_case : Union[str, Any]=4 , snake_case : List[str]=False , snake_case : int=18 , snake_case : List[Any]=1600 , snake_case : Union[str, Any]=1000 , snake_case : List[Any]=True , snake_case : Dict=True , snake_case : Optional[Any]=0 , snake_case : Dict=-1 , snake_case : List[Any]=True , snake_case : Any=0.1 , snake_case : List[Any]=0.0 , snake_case : List[str]=True , snake_case : Optional[Any]="normal" , snake_case : Optional[Any]=0.01 , snake_case : Union[str, Any]=0.01 , snake_case : List[str]=0.02 , snake_case : List[str]=1E-5 , snake_case : Optional[int]=0 , **snake_case : Union[str, Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = vocab_size
SCREAMING_SNAKE_CASE : Any = []
self.cutoffs.extend(snake_case )
if proj_share_all_but_first:
SCREAMING_SNAKE_CASE : Tuple = [False] + [True] * len(self.cutoffs )
else:
SCREAMING_SNAKE_CASE : List[Any] = [False] + [False] * len(self.cutoffs )
SCREAMING_SNAKE_CASE : Tuple = d_model
SCREAMING_SNAKE_CASE : Any = d_embed
SCREAMING_SNAKE_CASE : Tuple = d_head
SCREAMING_SNAKE_CASE : Union[str, Any] = d_inner
SCREAMING_SNAKE_CASE : Tuple = div_val
SCREAMING_SNAKE_CASE : int = pre_lnorm
SCREAMING_SNAKE_CASE : Tuple = n_layer
SCREAMING_SNAKE_CASE : List[str] = n_head
SCREAMING_SNAKE_CASE : Dict = mem_len
SCREAMING_SNAKE_CASE : Dict = same_length
SCREAMING_SNAKE_CASE : Union[str, Any] = attn_type
SCREAMING_SNAKE_CASE : str = clamp_len
SCREAMING_SNAKE_CASE : Any = sample_softmax
SCREAMING_SNAKE_CASE : Optional[int] = adaptive
SCREAMING_SNAKE_CASE : Optional[int] = dropout
SCREAMING_SNAKE_CASE : Union[str, Any] = dropatt
SCREAMING_SNAKE_CASE : List[str] = untie_r
SCREAMING_SNAKE_CASE : Union[str, Any] = init
SCREAMING_SNAKE_CASE : Optional[int] = init_range
SCREAMING_SNAKE_CASE : Tuple = proj_init_std
SCREAMING_SNAKE_CASE : str = init_std
SCREAMING_SNAKE_CASE : List[str] = layer_norm_epsilon
super().__init__(eos_token_id=snake_case , **snake_case )
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def lowerCamelCase_ ( self : Tuple , snake_case : Optional[Any] ):
'''simple docstring'''
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 308
| 0
|
"""simple docstring"""
from __future__ import annotations
lowerCAmelCase_ : Optional[int] = list[list[int]]
# assigning initial values to the grid
lowerCAmelCase_ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
lowerCAmelCase_ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
if location := find_empty_location(lowerCAmelCase ):
UpperCAmelCase , UpperCAmelCase = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase = digit
if sudoku(lowerCAmelCase ) is not None:
return grid
UpperCAmelCase = 0
return None
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
for row in grid:
for cell in row:
print(lowerCAmelCase , end=""" """ )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 2_0)
print_solution(example_grid)
print('''\nExample grid solution:''')
lowerCAmelCase_ : Optional[int] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''')
| 673
|
"""simple docstring"""
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : Optional[int] = False
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
return TrainCommand(lowerCAmelCase )
class UpperCamelCase_ ( a_ ):
@staticmethod
def UpperCamelCase_ ( snake_case__ ) -> int:
"""simple docstring"""
UpperCAmelCase = parser.add_parser("""train""" , help="""CLI tool to train a model on a task.""" )
train_parser.add_argument(
"""--train_data""" , type=snake_case__ , required=snake_case__ , help="""path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.""" , )
train_parser.add_argument(
"""--column_label""" , type=snake_case__ , default=0 , help="""Column of the dataset csv file with example labels.""" )
train_parser.add_argument(
"""--column_text""" , type=snake_case__ , default=1 , help="""Column of the dataset csv file with example texts.""" )
train_parser.add_argument(
"""--column_id""" , type=snake_case__ , default=2 , help="""Column of the dataset csv file with example ids.""" )
train_parser.add_argument(
"""--skip_first_row""" , action="""store_true""" , help="""Skip the first row of the csv file (headers).""" )
train_parser.add_argument("""--validation_data""" , type=snake_case__ , default="""""" , help="""path to validation dataset.""" )
train_parser.add_argument(
"""--validation_split""" , type=snake_case__ , default=0.1 , help="""if validation dataset is not provided, fraction of train dataset to use as validation dataset.""" , )
train_parser.add_argument("""--output""" , type=snake_case__ , default="""./""" , help="""path to saved the trained model.""" )
train_parser.add_argument(
"""--task""" , type=snake_case__ , default="""text_classification""" , help="""Task to train the model on.""" )
train_parser.add_argument(
"""--model""" , type=snake_case__ , default="""bert-base-uncased""" , help="""Model's name or path to stored model.""" )
train_parser.add_argument("""--train_batch_size""" , type=snake_case__ , default=32 , help="""Batch size for training.""" )
train_parser.add_argument("""--valid_batch_size""" , type=snake_case__ , default=64 , help="""Batch size for validation.""" )
train_parser.add_argument("""--learning_rate""" , type=snake_case__ , default=3e-5 , help="""Learning rate.""" )
train_parser.add_argument("""--adam_epsilon""" , type=snake_case__ , default=1e-08 , help="""Epsilon for Adam optimizer.""" )
train_parser.set_defaults(func=snake_case__ )
def __init__( self , snake_case__ ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = logging.get_logger("""transformers-cli/training""" )
UpperCAmelCase = """tf""" if is_tf_available() else """torch"""
os.makedirs(args.output , exist_ok=snake_case__ )
UpperCAmelCase = args.output
UpperCAmelCase = args.column_label
UpperCAmelCase = args.column_text
UpperCAmelCase = args.column_id
self.logger.info(f'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
UpperCAmelCase = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'''Loading dataset from {args.train_data}''' )
UpperCAmelCase = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
UpperCAmelCase = None
if args.validation_data:
self.logger.info(f'''Loading validation dataset from {args.validation_data}''' )
UpperCAmelCase = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
UpperCAmelCase = args.validation_split
UpperCAmelCase = args.train_batch_size
UpperCAmelCase = args.valid_batch_size
UpperCAmelCase = args.learning_rate
UpperCAmelCase = args.adam_epsilon
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
raise NotImplementedError
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 673
| 1
|
__UpperCamelCase : Optional[Any] = 256
# Modulus to hash a string
__UpperCamelCase : Any = 1000003
def _UpperCAmelCase ( UpperCAmelCase : str , UpperCAmelCase : str ):
"""simple docstring"""
__lowerCamelCase : str = len(UpperCAmelCase )
__lowerCamelCase : List[Any] = len(UpperCAmelCase )
if p_len > t_len:
return False
__lowerCamelCase : List[Any] = 0
__lowerCamelCase : str = 0
__lowerCamelCase : Any = 1
# Calculating the hash of pattern and substring of text
for i in range(UpperCAmelCase ):
__lowerCamelCase : Dict = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__lowerCamelCase : List[Any] = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__lowerCamelCase : Dict = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__lowerCamelCase : List[str] = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _UpperCAmelCase ( ):
"""simple docstring"""
__lowerCamelCase : List[Any] = """abc1abc12"""
__lowerCamelCase : Dict = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
__lowerCamelCase : int = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(UpperCAmelCase , UpperCAmelCase ) and not rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 2)
__lowerCamelCase : Optional[int] = """ABABX"""
__lowerCamelCase : Tuple = """ABABZABABYABABX"""
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 3)
__lowerCamelCase : Dict = """AAAB"""
__lowerCamelCase : str = """ABAAAAAB"""
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 4)
__lowerCamelCase : str = """abcdabcy"""
__lowerCamelCase : Any = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 5)
__lowerCamelCase : List[Any] = """Lü"""
__lowerCamelCase : Tuple = """Lüsai"""
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : str = """Lue"""
assert not rabin_karp(UpperCAmelCase , UpperCAmelCase )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 458
|
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCamelCase ( A,unittest.TestCase ):
'''simple docstring'''
a_ : Any = "ssube/stable-diffusion-x4-upscaler-onnx"
def _snake_case ( self : Any , _lowerCamelCase : List[str]=0 ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(_lowerCamelCase ) )
__lowerCamelCase : int = torch.manual_seed(_lowerCamelCase )
__lowerCamelCase : Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowerCamelCase : Tuple = self.get_dummy_inputs()
__lowerCamelCase : Dict = pipe(**_lowerCamelCase ).images
__lowerCamelCase : Dict = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCamelCase : str = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowerCamelCase : str = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowerCamelCase : Tuple = self.get_dummy_inputs()
__lowerCamelCase : Union[str, Any] = pipe(**_lowerCamelCase ).images
__lowerCamelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCamelCase : int = np.array(
[0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowerCamelCase : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowerCamelCase : List[str] = self.get_dummy_inputs()
__lowerCamelCase : List[str] = pipe(**_lowerCamelCase ).images
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCamelCase : str = np.array(
[0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _snake_case ( self : str ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowerCamelCase : Optional[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowerCamelCase : Dict = self.get_dummy_inputs()
__lowerCamelCase : Dict = pipe(**_lowerCamelCase ).images
__lowerCamelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCamelCase : List[str] = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _snake_case ( self : Any ):
'''simple docstring'''
__lowerCamelCase : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowerCamelCase : Union[str, Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowerCamelCase : Optional[int] = self.get_dummy_inputs()
__lowerCamelCase : int = pipe(**_lowerCamelCase ).images
__lowerCamelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCamelCase : Optional[int] = np.array(
[0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def _snake_case ( self : str ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowerCamelCase : Any = ort.SessionOptions()
__lowerCamelCase : str = False
return options
def _snake_case ( self : Any ):
'''simple docstring'''
__lowerCamelCase : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__lowerCamelCase : Optional[Any] = init_image.resize((1_2_8, 1_2_8) )
# using the PNDM scheduler by default
__lowerCamelCase : List[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowerCamelCase : List[Any] = """A fantasy landscape, trending on artstation"""
__lowerCamelCase : str = torch.manual_seed(0 )
__lowerCamelCase : Any = pipe(
prompt=_lowerCamelCase , image=_lowerCamelCase , guidance_scale=7.5 , num_inference_steps=1_0 , generator=_lowerCamelCase , output_type="""np""" , )
__lowerCamelCase : List[str] = output.images
__lowerCamelCase : Any = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCamelCase : int = np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _snake_case ( self : Any ):
'''simple docstring'''
__lowerCamelCase : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__lowerCamelCase : Union[str, Any] = init_image.resize((1_2_8, 1_2_8) )
__lowerCamelCase : str = LMSDiscreteScheduler.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , subfolder="""scheduler""" )
__lowerCamelCase : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , scheduler=_lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowerCamelCase : str = """A fantasy landscape, trending on artstation"""
__lowerCamelCase : Tuple = torch.manual_seed(0 )
__lowerCamelCase : Dict = pipe(
prompt=_lowerCamelCase , image=_lowerCamelCase , guidance_scale=7.5 , num_inference_steps=2_0 , generator=_lowerCamelCase , output_type="""np""" , )
__lowerCamelCase : int = output.images
__lowerCamelCase : List[Any] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCamelCase : List[str] = np.array(
[0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 458
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
__magic_name__: List[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__magic_name__: Optional[Any] = {
"vocab_file": {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt",
},
"tokenizer_file": {
"unc-nlp/lxmert-base-uncased": (
"https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__magic_name__: List[Any] = {
"unc-nlp/lxmert-base-uncased": 512,
}
__magic_name__: List[Any] = {
"unc-nlp/lxmert-base-uncased": {"do_lower_case": True},
}
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : str = VOCAB_FILES_NAMES
lowercase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
lowercase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Union[str, Any] = LxmertTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__="[UNK]" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="[PAD]" , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[MASK]" , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> Union[str, Any]:
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
__magic_name__ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCAmelCase__ ) != tokenize_chinese_chars
):
__magic_name__ : int = getattr(lowerCAmelCase__ , normalizer_state.pop("""type""" ) )
__magic_name__ : Optional[int] = do_lower_case
__magic_name__ : str = strip_accents
__magic_name__ : Any = tokenize_chinese_chars
__magic_name__ : int = normalizer_class(**lowerCAmelCase__ )
__magic_name__ : Any = do_lower_case
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> Dict:
__magic_name__ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
__magic_name__ : str = [self.sep_token_id]
__magic_name__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
__magic_name__ : str = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 324
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
lowercase__ : str = StableDiffusionPanoramaPipeline
lowercase__ : str = TEXT_TO_IMAGE_PARAMS
lowercase__ : List[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase__ : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase__ : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
def __magic_name__ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
__magic_name__ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
__magic_name__ : Union[str, Any] = DDIMScheduler()
torch.manual_seed(0 )
__magic_name__ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__magic_name__ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__magic_name__ : int = CLIPTextModel(lowerCAmelCase__ )
__magic_name__ : Any = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__magic_name__ : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> str:
__magic_name__ : Any = torch.manual_seed(lowerCAmelCase__ )
__magic_name__ : Tuple = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
# Setting height and width to None to prevent OOMs on CPU.
"""height""": None,
"""width""": None,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
__magic_name__ : Union[str, Any] = self.get_dummy_components()
__magic_name__ : List[str] = StableDiffusionPanoramaPipeline(**lowerCAmelCase__ )
__magic_name__ : List[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__magic_name__ : Dict = self.get_dummy_inputs(lowerCAmelCase__ )
__magic_name__ : Tuple = sd_pipe(**lowerCAmelCase__ ).images
__magic_name__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__magic_name__ : Any = np.array([0.6_1_8_6, 0.5_3_7_4, 0.4_9_1_5, 0.4_1_3_5, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_7, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __magic_name__ ( self ) -> List[Any]:
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def __magic_name__ ( self ) -> str:
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5e-3 )
def __magic_name__ ( self ) -> str:
__magic_name__ : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
__magic_name__ : Tuple = self.get_dummy_components()
__magic_name__ : Dict = StableDiffusionPanoramaPipeline(**lowerCAmelCase__ )
__magic_name__ : int = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = """french fries"""
__magic_name__ : int = sd_pipe(**lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ )
__magic_name__ : Dict = output.images
__magic_name__ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__magic_name__ : Dict = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __magic_name__ ( self ) -> str:
__magic_name__ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
__magic_name__ : Any = self.get_dummy_components()
__magic_name__ : Tuple = StableDiffusionPanoramaPipeline(**lowerCAmelCase__ )
__magic_name__ : str = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
__magic_name__ : List[Any] = sd_pipe(**lowerCAmelCase__ , view_batch_size=2 )
__magic_name__ : List[Any] = output.images
__magic_name__ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__magic_name__ : List[Any] = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__magic_name__ : str = self.get_dummy_components()
__magic_name__ : int = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" )
__magic_name__ : str = StableDiffusionPanoramaPipeline(**lowerCAmelCase__ )
__magic_name__ : str = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__magic_name__ : Dict = self.get_dummy_inputs(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = sd_pipe(**lowerCAmelCase__ ).images
__magic_name__ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__magic_name__ : int = np.array([0.4_0_2_4, 0.6_5_1_0, 0.4_9_0_1, 0.5_3_7_8, 0.5_8_1_3, 0.5_6_2_2, 0.4_7_9_5, 0.4_4_6_7, 0.4_9_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
__magic_name__ : Tuple = self.get_dummy_components()
__magic_name__ : int = PNDMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , skip_prk_steps=lowerCAmelCase__ )
__magic_name__ : List[Any] = StableDiffusionPanoramaPipeline(**lowerCAmelCase__ )
__magic_name__ : Dict = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__magic_name__ : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
__magic_name__ : Dict = sd_pipe(**lowerCAmelCase__ ).images
__magic_name__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__magic_name__ : str = np.array([0.6_3_9_1, 0.6_2_9_1, 0.4_8_6_1, 0.5_1_3_4, 0.5_5_5_2, 0.4_5_7_8, 0.5_0_3_2, 0.5_0_2_3, 0.4_5_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self , lowerCAmelCase__=0 ) -> List[Any]:
__magic_name__ : Union[str, Any] = torch.manual_seed(lowerCAmelCase__ )
__magic_name__ : str = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : int = """stabilityai/stable-diffusion-2-base"""
__magic_name__ : Optional[int] = DDIMScheduler.from_pretrained(lowerCAmelCase__ , subfolder="""scheduler""" )
__magic_name__ : Optional[int] = StableDiffusionPanoramaPipeline.from_pretrained(lowerCAmelCase__ , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__magic_name__ : List[str] = self.get_inputs()
__magic_name__ : Tuple = pipe(**lowerCAmelCase__ ).images
__magic_name__ : Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
__magic_name__ : Tuple = np.array(
[
0.3_6_9_6_8_3_9_2,
0.2_7_0_2_5_3_7_2,
0.3_2_4_4_6_7_6_6,
0.2_8_3_7_9_3_8_7,
0.3_6_3_6_3_2_7_4,
0.3_0_7_3_3_3_4_7,
0.2_7_1_0_0_0_2_7,
0.2_7_0_5_4_1_2_5,
0.2_5_5_3_6_0_9_6,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : Dict = StableDiffusionPanoramaPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-base""" , safety_checker=lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__magic_name__ : List[str] = self.get_inputs()
__magic_name__ : int = pipe(**lowerCAmelCase__ ).images
__magic_name__ : Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
__magic_name__ : Any = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def __magic_name__ ( self ) -> str:
__magic_name__ : List[str] = 0
def callback_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
__magic_name__ : List[Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__magic_name__ : int = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
__magic_name__ : int = latents[0, -3:, -3:, -1]
__magic_name__ : Optional[Any] = np.array(
[
0.1_8_6_8_1_8_6_9,
0.3_3_9_0_7_8_1_6,
0.5_3_6_1_2_7_6,
0.1_4_4_3_2_8_6_5,
-0.0_2_8_5_6_6_1_1,
-0.7_3_9_4_1_1_2_3,
0.2_3_3_9_7_9_8_7,
0.4_7_3_2_2_6_8_2,
-0.3_7_8_2_3_1_6_4,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
__magic_name__ : List[Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
__magic_name__ : List[str] = latents[0, -3:, -3:, -1]
__magic_name__ : Dict = np.array(
[
0.1_8_5_3_9_6_4_5,
0.3_3_9_8_7_2_4_8,
0.5_3_7_8_5_5_9,
0.1_4_4_3_7_1_4_2,
-0.0_2_4_5_5_2_6_1,
-0.7_3_3_8_3_1_7,
0.2_3_9_9_0_7_5_5,
0.4_7_3_5_6_2_7_2,
-0.3_7_8_6_5_0_5,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
__magic_name__ : List[Any] = False
__magic_name__ : Union[str, Any] = """stabilityai/stable-diffusion-2-base"""
__magic_name__ : int = DDIMScheduler.from_pretrained(lowerCAmelCase__ , subfolder="""scheduler""" )
__magic_name__ : List[str] = StableDiffusionPanoramaPipeline.from_pretrained(lowerCAmelCase__ , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ )
__magic_name__ : Optional[Any] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__magic_name__ : List[Any] = self.get_inputs()
pipe(**lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __magic_name__ ( self ) -> List[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__magic_name__ : List[Any] = """stabilityai/stable-diffusion-2-base"""
__magic_name__ : List[str] = DDIMScheduler.from_pretrained(lowerCAmelCase__ , subfolder="""scheduler""" )
__magic_name__ : List[str] = StableDiffusionPanoramaPipeline.from_pretrained(lowerCAmelCase__ , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ )
__magic_name__ : str = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__magic_name__ : List[Any] = self.get_inputs()
__magic_name__ : Union[str, Any] = pipe(**lowerCAmelCase__ )
__magic_name__ : List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 324
| 1
|
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> str | Literal[False]:
'''simple docstring'''
__lowerCAmelCase = list(UpperCamelCase__ )
__lowerCAmelCase = list(UpperCamelCase__ )
__lowerCAmelCase = 0
for i in range(len(UpperCamelCase__ ) ):
if lista[i] != lista[i]:
count += 1
__lowerCAmelCase = """_"""
if count > 1:
return False
else:
return "".join(UpperCamelCase__ )
def UpperCAmelCase ( UpperCamelCase__ ) -> list[str]:
'''simple docstring'''
__lowerCAmelCase = []
while True:
__lowerCAmelCase = ["""$"""] * len(UpperCamelCase__ )
__lowerCAmelCase = []
for i in range(len(UpperCamelCase__ ) ):
for j in range(i + 1 , len(UpperCamelCase__ ) ):
__lowerCAmelCase = compare_string(binary[i] , binary[j] )
if k is False:
__lowerCAmelCase = """*"""
__lowerCAmelCase = """*"""
temp.append("""X""" )
for i in range(len(UpperCamelCase__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(UpperCamelCase__ ) == 0:
return pi
__lowerCAmelCase = list(set(UpperCamelCase__ ) )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> list[str]:
'''simple docstring'''
__lowerCAmelCase = []
for minterm in minterms:
__lowerCAmelCase = """"""
for _ in range(UpperCamelCase__ ):
__lowerCAmelCase = str(minterm % 2 ) + string
minterm //= 2
temp.append(UpperCamelCase__ )
return temp
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> bool:
'''simple docstring'''
__lowerCAmelCase = list(UpperCamelCase__ )
__lowerCAmelCase = list(UpperCamelCase__ )
__lowerCAmelCase = 0
for i in range(len(UpperCamelCase__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> list[str]:
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = [0] * len(UpperCamelCase__ )
for i in range(len(chart[0] ) ):
__lowerCAmelCase = 0
__lowerCAmelCase = -1
for j in range(len(UpperCamelCase__ ) ):
if chart[j][i] == 1:
count += 1
__lowerCAmelCase = j
if count == 1:
__lowerCAmelCase = 1
for i in range(len(UpperCamelCase__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(UpperCamelCase__ ) ):
__lowerCAmelCase = 0
temp.append(prime_implicants[i] )
while True:
__lowerCAmelCase = 0
__lowerCAmelCase = -1
__lowerCAmelCase = 0
for i in range(len(UpperCamelCase__ ) ):
__lowerCAmelCase = chart[i].count(1 )
if count_n > max_n:
__lowerCAmelCase = count_n
__lowerCAmelCase = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(UpperCamelCase__ ) ):
__lowerCAmelCase = 0
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> list[list[int]]:
'''simple docstring'''
__lowerCAmelCase = [[0 for x in range(len(UpperCamelCase__ ) )] for x in range(len(UpperCamelCase__ ) )]
for i in range(len(UpperCamelCase__ ) ):
__lowerCAmelCase = prime_implicants[i].count("""_""" )
for j in range(len(UpperCamelCase__ ) ):
if is_for_table(prime_implicants[i] , binary[j] , UpperCamelCase__ ):
__lowerCAmelCase = 1
return chart
def UpperCAmelCase ( ) -> None:
'''simple docstring'''
__lowerCAmelCase = int(input("""Enter the no. of variables\n""" ) )
__lowerCAmelCase = [
float(UpperCamelCase__ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
__lowerCAmelCase = decimal_to_binary(UpperCamelCase__ , UpperCamelCase__ )
__lowerCAmelCase = check(UpperCamelCase__ )
print("""Prime Implicants are:""" )
print(UpperCamelCase__ )
__lowerCAmelCase = prime_implicant_chart(UpperCamelCase__ , UpperCamelCase__ )
__lowerCAmelCase = selection(UpperCamelCase__ , UpperCamelCase__ )
print("""Essential Prime Implicants are:""" )
print(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 719
|
from __future__ import annotations
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> list[str]:
'''simple docstring'''
if partitions <= 0:
raise ValueError("""partitions must be a positive number!""" )
if partitions > number_of_bytes:
raise ValueError("""partitions can not > number_of_bytes!""" )
__lowerCAmelCase = number_of_bytes // partitions
__lowerCAmelCase = []
for i in range(UpperCamelCase__ ):
__lowerCAmelCase = i * bytes_per_partition + 1
__lowerCAmelCase = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 334
| 0
|
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=False , lowercase=True , lowercase="None" , lowercase=3 , lowercase=4 , lowercase=None , ):
A_ : Optional[int] = parent
A_ : int = batch_size
A_ : Union[str, Any] = seq_length
A_ : str = is_training
A_ : Dict = use_input_mask
A_ : Dict = use_token_type_ids
A_ : Dict = use_labels
A_ : List[str] = vocab_size
A_ : int = hidden_size
A_ : Tuple = num_hidden_layers
A_ : int = num_attention_heads
A_ : Optional[Any] = intermediate_size
A_ : Any = hidden_act
A_ : List[Any] = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : int = max_position_embeddings
A_ : Optional[Any] = type_vocab_size
A_ : Optional[int] = type_sequence_label_size
A_ : str = initializer_range
A_ : Union[str, Any] = num_labels
A_ : Union[str, Any] = num_choices
A_ : Dict = relative_attention
A_ : Tuple = position_biased_input
A_ : str = pos_att_type
A_ : Union[str, Any] = scope
def _a (self ):
A_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : Optional[int] = None
if self.use_input_mask:
A_ : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
A_ : List[str] = None
if self.use_token_type_ids:
A_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ : Optional[Any] = None
A_ : Union[str, Any] = None
A_ : Dict = None
if self.use_labels:
A_ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
A_ : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a (self ):
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _a (self ):
A_ : List[Any] = self.get_config()
A_ : Any = 300
return config
def _a (self , lowercase ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : Optional[Any] = DebertaModel(config=lowercase )
model.to(lowercase )
model.eval()
A_ : int = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase )[0]
A_ : int = model(lowercase , token_type_ids=lowercase )[0]
A_ : str = model(lowercase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : Any = DebertaForMaskedLM(config=lowercase )
model.to(lowercase )
model.eval()
A_ : Dict = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : Union[str, Any] = self.num_labels
A_ : Optional[int] = DebertaForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
A_ : List[Any] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowercase )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : Optional[Any] = self.num_labels
A_ : List[str] = DebertaForTokenClassification(config=lowercase )
model.to(lowercase )
model.eval()
A_ : Tuple = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : Union[str, Any] = DebertaForQuestionAnswering(config=lowercase )
model.to(lowercase )
model.eval()
A_ : str = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , start_positions=lowercase , end_positions=lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a (self ):
A_ : Dict = self.prepare_config_and_inputs()
(
(
A_
), (
A_
), (
A_
), (
A_
), (
A_
), (
A_
), (
A_
),
) : Dict = config_and_inputs
A_ : Optional[int] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : int = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : str = (
{
'feature-extraction': DebertaModel,
'fill-mask': DebertaForMaskedLM,
'question-answering': DebertaForQuestionAnswering,
'text-classification': DebertaForSequenceClassification,
'token-classification': DebertaForTokenClassification,
'zero-shot': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : str = True
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : Any = False
__SCREAMING_SNAKE_CASE : List[str] = False
def _a (self ):
A_ : Optional[Any] = DebertaModelTester(self )
A_ : int = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def _a (self ):
self.config_tester.run_common_tests()
def _a (self ):
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowercase )
def _a (self ):
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowercase )
def _a (self ):
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowercase )
def _a (self ):
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowercase )
def _a (self ):
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowercase )
@slow
def _a (self ):
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : str = DebertaModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def _a (self ):
pass
@slow
def _a (self ):
A_ : Tuple = DebertaModel.from_pretrained("""microsoft/deberta-base""" )
A_ : int = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
A_ : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A_ : Union[str, Any] = model(lowercase , attention_mask=lowercase )[0]
# compare the actual values for a slice.
A_ : int = torch.tensor(
[[[-0.59_86, -0.80_55, -0.84_62], [1.44_84, -0.93_48, -0.80_59], [0.31_23, 0.00_32, -1.41_31]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowercase , atol=1E-4 ) , F'{output[:, 1:4, 1:4]}' )
| 667
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) )
A_ : int = np.zeros((n + 1,) )
A_ : List[str] = ya
A_ : Any = xa
for k in range(lowerCamelCase__ ):
A_ : List[Any] = y[k] + step_size * ode_func(lowerCamelCase__ , y[k] )
A_ : Optional[int] = y[k] + (
(step_size / 2) * (ode_func(lowerCamelCase__ , y[k] ) + ode_func(x + step_size , lowerCamelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667
| 1
|
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Tuple ):
'''simple docstring'''
_snake_case : dict[str, TrieNode] = {} # Mapping from char to TrieNode
_snake_case : List[Any] = False
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : list[str] ):
'''simple docstring'''
for word in words:
self.insert(UpperCamelCase )
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : str ):
'''simple docstring'''
_snake_case : List[Any] = self
for char in word:
if char not in curr.nodes:
_snake_case : List[Any] = TrieNode()
_snake_case : int = curr.nodes[char]
_snake_case : Union[str, Any] = True
def UpperCamelCase_ ( self : str , UpperCamelCase : str ):
'''simple docstring'''
_snake_case : Union[str, Any] = self
for char in word:
if char not in curr.nodes:
return False
_snake_case : Dict = curr.nodes[char]
return curr.is_leaf
def UpperCamelCase_ ( self : Dict , UpperCamelCase : str ):
'''simple docstring'''
def _delete(UpperCamelCase : TrieNode , UpperCamelCase : str , UpperCamelCase : int ) -> bool:
if index == len(UpperCamelCase ):
# If word does not exist
if not curr.is_leaf:
return False
_snake_case : Optional[Any] = False
return len(curr.nodes ) == 0
_snake_case : str = word[index]
_snake_case : str = curr.nodes.get(UpperCamelCase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
_snake_case : int = _delete(UpperCamelCase , UpperCamelCase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , UpperCamelCase , 0 )
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Any )-> Tuple:
if node.is_leaf:
print(lowerCAmelCase , end=' ' )
for key, value in node.nodes.items():
print_words(lowerCAmelCase , word + key )
def lowerCamelCase_ ( )-> int:
_snake_case : int = """banana bananas bandana band apple all beast""".split()
_snake_case : int = TrieNode()
root.insert_many(lowerCAmelCase )
# print_words(root, "")
assert all(root.find(lowerCAmelCase ) for word in words )
assert root.find('banana' )
assert not root.find('bandanas' )
assert not root.find('apps' )
assert root.find('apple' )
assert root.find('all' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: List[str] )-> List[str]:
print(str(lowerCAmelCase ) , 'works!' if passes else 'doesn\'t work :(' )
def lowerCamelCase_ ( )-> Any:
assert test_trie()
def lowerCamelCase_ ( )-> str:
print_results('Testing trie functionality' , test_trie() )
if __name__ == "__main__":
main()
| 714
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Tuple ="""audio-spectrogram-transformer"""
def __init__( self : List[Any] , UpperCamelCase : Union[str, Any]=7_68 , UpperCamelCase : int=12 , UpperCamelCase : str=12 , UpperCamelCase : Tuple=30_72 , UpperCamelCase : Optional[Any]="gelu" , UpperCamelCase : Any=0.0 , UpperCamelCase : Dict=0.0 , UpperCamelCase : List[Any]=0.02 , UpperCamelCase : Dict=1e-1_2 , UpperCamelCase : str=16 , UpperCamelCase : List[Any]=True , UpperCamelCase : Any=10 , UpperCamelCase : Optional[int]=10 , UpperCamelCase : int=10_24 , UpperCamelCase : Optional[Any]=1_28 , **UpperCamelCase : Optional[Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
_snake_case : Tuple = hidden_size
_snake_case : str = num_hidden_layers
_snake_case : Optional[Any] = num_attention_heads
_snake_case : Optional[Any] = intermediate_size
_snake_case : Optional[Any] = hidden_act
_snake_case : List[str] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : Any = initializer_range
_snake_case : List[str] = layer_norm_eps
_snake_case : int = patch_size
_snake_case : List[str] = qkv_bias
_snake_case : int = frequency_stride
_snake_case : List[Any] = time_stride
_snake_case : List[Any] = max_length
_snake_case : List[str] = num_mel_bins
| 669
| 0
|
import math
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> float:
"""simple docstring"""
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(UpperCamelCase__ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="""malus_law""")
| 662
|
"""simple docstring"""
from __future__ import annotations
UpperCamelCase : Any = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
A = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase__ ) )
] # the reference grid
A = 1
A = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase__ ) )
] # the action grid
A = init[0]
A = init[1]
A = 0
A = g + heuristic[x][y] # cost from starting cell to destination cell
A = [[f, g, x, y]]
A = False # flag that is set when search is complete
A = False # flag set if we can't find expand
while not found and not resign:
if len(UpperCamelCase__ ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
A = cell.pop()
A = next_cell[2]
A = next_cell[3]
A = next_cell[1]
if x == goal[0] and y == goal[1]:
A = True
else:
for i in range(len(UpperCamelCase__ ) ): # to try out different valid actions
A = x + DIRECTIONS[i][0]
A = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(UpperCamelCase__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
A = g + cost
A = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
A = 1
A = i
A = []
A = goal[0]
A = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
A = x - DIRECTIONS[action[x][y]][0]
A = y - DIRECTIONS[action[x][y]][1]
A = xa
A = ya
invpath.append([x, y] )
A = []
for i in range(len(UpperCamelCase__ ) ):
path.append(invpath[len(UpperCamelCase__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCamelCase : Any = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCamelCase : List[Any] = [0, 0]
# all coordinates are given in format [y,x]
UpperCamelCase : int = [len(grid) - 1, len(grid[0]) - 1]
UpperCamelCase : Tuple = 1
# the cost map which pushes the path closer to the goal
UpperCamelCase : Union[str, Any] = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCamelCase : List[str] = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCamelCase : Dict = 99
UpperCamelCase , UpperCamelCase : Optional[Any] = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 690
| 0
|
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 709
|
import cva
import numpy as np
class UpperCamelCase__ :
def __init__( self : List[str] , UpperCamelCase__ : float , UpperCamelCase__ : int ):
'''simple docstring'''
if k in (0.04, 0.06):
lowercase_ = k
lowercase_ = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : Optional[int] ):
'''simple docstring'''
return str(self.k )
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : str ):
'''simple docstring'''
lowercase_ = cva.imread(UpperCamelCase__ , 0 )
lowercase_ , lowercase_ = img.shape
lowercase_ = []
lowercase_ = img.copy()
lowercase_ = cva.cvtColor(UpperCamelCase__ , cva.COLOR_GRAY2RGB )
lowercase_ , lowercase_ = np.gradient(UpperCamelCase__ )
lowercase_ = dx**2
lowercase_ = dy**2
lowercase_ = dx * dy
lowercase_ = 0.04
lowercase_ = self.window_size // 2
for y in range(UpperCamelCase__ , h - offset ):
for x in range(UpperCamelCase__ , w - offset ):
lowercase_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase_ = (wxx * wyy) - (wxy**2)
lowercase_ = wxx + wyy
lowercase_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
a = HarrisCorner(0.04, 3)
a , a = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 650
| 0
|
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def _UpperCamelCase ( lowerCAmelCase_ = 3 ) ->qiskit.result.counts.Counts:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise TypeError("""number of qubits must be a integer.""" )
if number_of_qubits <= 0:
raise ValueError("""number of qubits must be > 0.""" )
if math.floor(lowerCAmelCase_ ) != number_of_qubits:
raise ValueError("""number of qubits must be exact integer.""" )
if number_of_qubits > 1_0:
raise ValueError("""number of qubits too large to simulate(>10).""" )
UpperCAmelCase = QuantumRegister(lowerCAmelCase_ , """qr""" )
UpperCAmelCase = ClassicalRegister(lowerCAmelCase_ , """cr""" )
UpperCAmelCase = QuantumCircuit(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase = number_of_qubits
for i in range(lowerCAmelCase_ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(lowerCAmelCase_ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , lowerCAmelCase_ , lowerCAmelCase_ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(lowerCAmelCase_ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(lowerCAmelCase_ , lowerCAmelCase_ )
# simulate with 10000 shots
UpperCAmelCase = Aer.get_backend("""qasm_simulator""" )
UpperCAmelCase = execute(lowerCAmelCase_ , lowerCAmelCase_ , shots=1_0_0_0_0 )
return job.result().get_counts(lowerCAmelCase_ )
if __name__ == "__main__":
print(
F"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
)
| 377
|
from __future__ import annotations
import math
def _UpperCamelCase ( lowerCAmelCase_ ) ->bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__a = [num for num in range(3, 10_0001, 2) if not is_prime(num)]
def _UpperCamelCase ( lowerCAmelCase_ ) ->list[int]:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
UpperCAmelCase = []
for num in range(len(lowerCAmelCase_ ) ):
UpperCAmelCase = 0
while 2 * i * i <= odd_composites[num]:
UpperCAmelCase = odd_composites[num] - 2 * i * i
if is_prime(lowerCAmelCase_ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowerCAmelCase_ ) == n:
return list_nums
return []
def _UpperCamelCase ( ) ->int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 377
| 1
|
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def a_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Optional[int]="pt" ):
'''simple docstring'''
lowercase__ : Dict = {'add_prefix_space': True} if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and not line.startswith(' ' ) else {}
lowercase__ : Any = padding_side
return tokenizer(
[line] , max_length=_lowerCAmelCase , padding='max_length' if pad_to_max_length else None , truncation=_lowerCAmelCase , return_tensors=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , **_lowerCAmelCase , )
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any=None , ):
'''simple docstring'''
lowercase__ : int = input_ids.ne(_lowerCAmelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class UpperCAmelCase_ ( _a):
def __init__( self , a , a , a , a , a="train" , a=None , a=None , a=None , a="" , ) -> List[Any]:
super().__init__()
lowercase__ : Dict = Path(a ).joinpath(type_path + '.source' )
lowercase__ : Union[str, Any] = Path(a ).joinpath(type_path + '.target' )
lowercase__ : Any = self.get_char_lens(self.src_file )
lowercase__ : Union[str, Any] = max_source_length
lowercase__ : Tuple = max_target_length
assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}"""
lowercase__ : Optional[int] = tokenizer
lowercase__ : Optional[Any] = prefix
if n_obs is not None:
lowercase__ : Union[str, Any] = self.src_lens[:n_obs]
lowercase__ : Dict = src_lang
lowercase__ : Optional[Any] = tgt_lang
def __len__( self ) -> Any:
return len(self.src_lens )
def __getitem__( self , a ) -> Dict[str, torch.Tensor]:
lowercase__ : Dict = index + 1 # linecache starts at 1
lowercase__ : List[Any] = self.prefix + linecache.getline(str(self.src_file ) , a ).rstrip('\n' )
lowercase__ : Optional[Any] = linecache.getline(str(self.tgt_file ) , a ).rstrip('\n' )
assert source_line, f"""empty source line for index {index}"""
assert tgt_line, f"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , a ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
lowercase__ : str = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , a ) else self.tokenizer
)
lowercase__ : List[Any] = self.tokenizer.generator if isinstance(self.tokenizer , a ) else self.tokenizer
lowercase__ : Optional[Any] = encode_line(a , a , self.max_source_length , 'right' )
lowercase__ : Dict = encode_line(a , a , self.max_target_length , 'right' )
lowercase__ : str = source_inputs['input_ids'].squeeze()
lowercase__ : Any = target_inputs['input_ids'].squeeze()
lowercase__ : Optional[int] = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _UpperCAmelCase ( a ) -> int:
return [len(a ) for x in Path(a ).open().readlines()]
def _UpperCAmelCase ( self , a ) -> Dict[str, torch.Tensor]:
lowercase__ : Optional[int] = torch.stack([x['input_ids'] for x in batch] )
lowercase__ : List[Any] = torch.stack([x['attention_mask'] for x in batch] )
lowercase__ : str = torch.stack([x['decoder_input_ids'] for x in batch] )
lowercase__ : Dict = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , a )
else self.tokenizer.pad_token_id
)
lowercase__ : Union[str, Any] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , a )
else self.tokenizer.pad_token_id
)
lowercase__ : int = trim_batch(a , a )
lowercase__ , lowercase__ : List[Any] = trim_batch(a , a , attention_mask=a )
lowercase__ : Optional[int] = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
_UpperCamelCase : Optional[int] = getLogger(__name__)
def a_ ( _lowerCAmelCase : List[List] ):
'''simple docstring'''
return list(itertools.chain.from_iterable(_lowerCAmelCase ) )
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : str = get_git_info()
save_json(_lowerCAmelCase , os.path.join(_lowerCAmelCase , 'git_log.json' ) )
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int=4 , **_lowerCAmelCase : Any ):
'''simple docstring'''
with open(_lowerCAmelCase , 'w' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase , indent=_lowerCAmelCase , **_lowerCAmelCase )
def a_ ( _lowerCAmelCase : Tuple ):
'''simple docstring'''
with open(_lowerCAmelCase ) as f:
return json.load(_lowerCAmelCase )
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[int] = git.Repo(search_parent_directories=_lowerCAmelCase )
lowercase__ : str = {
'repo_id': str(_lowerCAmelCase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def a_ ( _lowerCAmelCase : Callable , _lowerCAmelCase : Iterable ):
'''simple docstring'''
return list(map(_lowerCAmelCase , _lowerCAmelCase ) )
def a_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int ):
'''simple docstring'''
with open(_lowerCAmelCase , 'wb' ) as f:
return pickle.dump(_lowerCAmelCase , _lowerCAmelCase )
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
def remove_articles(_lowerCAmelCase : Optional[int] ):
return re.sub(R'\b(a|an|the)\b' , ' ' , _lowerCAmelCase )
def white_space_fix(_lowerCAmelCase : Union[str, Any] ):
return " ".join(text.split() )
def remove_punc(_lowerCAmelCase : Optional[int] ):
lowercase__ : Dict = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowerCAmelCase : str ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowerCAmelCase ) ) ) )
def a_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : List[str] = normalize_answer(_lowerCAmelCase ).split()
lowercase__ : Dict = normalize_answer(_lowerCAmelCase ).split()
lowercase__ : Any = Counter(_lowerCAmelCase ) & Counter(_lowerCAmelCase )
lowercase__ : Optional[int] = sum(common.values() )
if num_same == 0:
return 0
lowercase__ : List[str] = 1.0 * num_same / len(_lowerCAmelCase )
lowercase__ : Optional[int] = 1.0 * num_same / len(_lowerCAmelCase )
lowercase__ : Optional[int] = (2 * precision * recall) / (precision + recall)
return fa
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
return normalize_answer(_lowerCAmelCase ) == normalize_answer(_lowerCAmelCase )
def a_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] ):
'''simple docstring'''
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
lowercase__ : Optional[int] = 0
for hypo, pred in zip(_lowerCAmelCase , _lowerCAmelCase ):
em += exact_match_score(_lowerCAmelCase , _lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
em /= len(_lowerCAmelCase )
return {"em": em}
def a_ ( _lowerCAmelCase : Optional[int] ):
'''simple docstring'''
return model_prefix.startswith('rag' )
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Any = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
lowercase__ : Dict = 'dropout_rate'
for p in extra_params:
if getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
if not hasattr(_lowerCAmelCase , _lowerCAmelCase ) and not hasattr(_lowerCAmelCase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_lowerCAmelCase ) )
delattr(_lowerCAmelCase , _lowerCAmelCase )
continue
lowercase__ : Union[str, Any] = p if hasattr(_lowerCAmelCase , _lowerCAmelCase ) else equivalent_param[p]
setattr(_lowerCAmelCase , _lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) )
delattr(_lowerCAmelCase , _lowerCAmelCase )
return hparams, config
| 645
|
"""simple docstring"""
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_UpperCamelCase : int = 16
_UpperCamelCase : Union[str, Any] = 32
def a_ ( _lowerCAmelCase : Tuple ):
'''simple docstring'''
return int(x / 2**20 )
class UpperCAmelCase_ :
def __enter__( self ) -> Union[str, Any]:
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowercase__ : List[str] = torch.cuda.memory_allocated()
return self
def __exit__( self , *a ) -> Any:
gc.collect()
torch.cuda.empty_cache()
lowercase__ : Optional[Any] = torch.cuda.memory_allocated()
lowercase__ : Union[str, Any] = torch.cuda.max_memory_allocated()
lowercase__ : List[Any] = bamb(self.end - self.begin )
lowercase__ : List[Any] = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def a_ ( _lowerCAmelCase : Accelerator , _lowerCAmelCase : int = 16 , _lowerCAmelCase : str = "bert-base-cased" , _lowerCAmelCase : int = 320 , _lowerCAmelCase : int = 160 , ):
'''simple docstring'''
lowercase__ : List[Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
lowercase__ : Union[str, Any] = load_dataset(
'glue' , 'mrpc' , split={'train': f"""train[:{n_train}]""", 'validation': f"""validation[:{n_val}]"""} )
def tokenize_function(_lowerCAmelCase : int ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : List[str] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ : Union[str, Any] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_lowerCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : Union[str, Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_lowerCAmelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(_lowerCAmelCase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
lowercase__ : Dict = DataLoader(
tokenized_datasets['train'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
lowercase__ : Dict = DataLoader(
tokenized_datasets['validation'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
return train_dataloader, eval_dataloader
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[str] ):
'''simple docstring'''
lowercase__ : List[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : Optional[int] = config['lr']
lowercase__ : Optional[Any] = int(config['num_epochs'] )
lowercase__ : Optional[Any] = int(config['seed'] )
lowercase__ : int = int(config['batch_size'] )
lowercase__ : Union[str, Any] = args.model_name_or_path
set_seed(_lowerCAmelCase )
lowercase__ , lowercase__ : Tuple = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : List[Any] = AutoModelForSequenceClassification.from_pretrained(_lowerCAmelCase , return_dict=_lowerCAmelCase )
# Instantiate optimizer
lowercase__ : List[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase__ : Optional[Any] = optimizer_cls(params=model.parameters() , lr=_lowerCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
lowercase__ : Optional[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
lowercase__ : List[Any] = 1
lowercase__ : List[Any] = (len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase__ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=_lowerCAmelCase , num_warmup_steps=0 , num_training_steps=_lowerCAmelCase , )
else:
lowercase__ : Tuple = DummyScheduler(_lowerCAmelCase , total_num_steps=_lowerCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
lowercase__ : Optional[int] = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase__ : Tuple = 0
# Now we train the model
lowercase__ : Optional[Any] = {}
for epoch in range(_lowerCAmelCase , _lowerCAmelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(_lowerCAmelCase ):
lowercase__ : List[Any] = model(**_lowerCAmelCase )
lowercase__ : Dict = outputs.loss
lowercase__ : int = loss / gradient_accumulation_steps
accelerator.backward(_lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) )
accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) )
accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) )
accelerator.print(
'Total Peak Memory consumed during the train (max): {}'.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowercase__ : int = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def a_ ( ):
'''simple docstring'''
lowercase__ : int = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=_lowerCAmelCase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_lowerCAmelCase , )
parser.add_argument(
'--output_dir' , type=_lowerCAmelCase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--peak_memory_upper_bound' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , )
parser.add_argument(
'--n_train' , type=_lowerCAmelCase , default=320 , help='Number of training examples to use.' , )
parser.add_argument(
'--n_val' , type=_lowerCAmelCase , default=160 , help='Number of validation examples to use.' , )
parser.add_argument(
'--num_epochs' , type=_lowerCAmelCase , default=1 , help='Number of train epochs.' , )
lowercase__ : Any = parser.parse_args()
lowercase__ : Optional[Any] = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 645
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase ={
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase =["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase =["CLIPFeatureExtractor"]
UpperCAmelCase =["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase =[
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase =[
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase =[
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 617
|
"""simple docstring"""
import qiskit
def _A ( _a : int , _a : int ):
"""simple docstring"""
A = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
A = qiskit.QuantumCircuit(_a , _a )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
A = qiskit.execute(_a , _a , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_a )
if __name__ == "__main__":
UpperCAmelCase =single_qubit_measure(2, 2)
print(f"""Total count for various states are: {counts}""")
| 617
| 1
|
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class snake_case__:
"""simple docstring"""
def snake_case ( self : str , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] ):
return None
class snake_case__:
"""simple docstring"""
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str] ):
return None
class snake_case__(unittest.TestCase ):
"""simple docstring"""
lowercase_ = [
# (model_name, model_kwargs)
("""bert-base-cased""", {}),
("""gpt2""", {"""use_cache""": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def snake_case ( self : Optional[Any] ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(SCREAMING_SNAKE_CASE , "tf" , 12 , **SCREAMING_SNAKE_CASE )
@require_torch
@slow
def snake_case ( self : List[Any] ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(SCREAMING_SNAKE_CASE , "pt" , 12 , **SCREAMING_SNAKE_CASE )
@require_torch
@slow
def snake_case ( self : List[str] ):
from transformers import BertModel
lowercase__ : int = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"]
with NamedTemporaryFile(mode="w+t" ) as vocab_file:
vocab_file.write("\n".join(SCREAMING_SNAKE_CASE ) )
vocab_file.flush()
lowercase__ : List[str] = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowercase__ : Optional[Any] = BertModel(BertConfig(vocab_size=len(SCREAMING_SNAKE_CASE ) ) )
model.save_pretrained(SCREAMING_SNAKE_CASE )
self._test_export(SCREAMING_SNAKE_CASE , "pt" , 12 , SCREAMING_SNAKE_CASE )
@require_tf
@slow
def snake_case ( self : Union[str, Any] ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase__ : List[str] = self._test_export(SCREAMING_SNAKE_CASE , "tf" , 12 , **SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = quantize(Path(SCREAMING_SNAKE_CASE ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(SCREAMING_SNAKE_CASE ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
@require_torch
@slow
def snake_case ( self : Union[str, Any] ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase__ : Tuple = self._test_export(SCREAMING_SNAKE_CASE , "pt" , 12 , **SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = quantize(SCREAMING_SNAKE_CASE )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(SCREAMING_SNAKE_CASE ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[Any]=None , **SCREAMING_SNAKE_CASE : Any ):
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowercase__ : List[str] = Path(SCREAMING_SNAKE_CASE ).joinpath("model.onnx" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
return path
except Exception as e:
self.fail(SCREAMING_SNAKE_CASE )
@require_torch
@require_tokenizers
@slow
def snake_case ( self : List[str] ):
from transformers import BertModel
lowercase__ : Tuple = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
lowercase__ : Optional[Any] = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "pt" )
@require_tf
@require_tokenizers
@slow
def snake_case ( self : Optional[Any] ):
from transformers import TFBertModel
lowercase__ : Tuple = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
lowercase__ : Optional[Any] = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "tf" )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : Optional[Any] = FeatureExtractionPipeline(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"]
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = infer_shapes(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Assert all variables are present
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , SCREAMING_SNAKE_CASE )
self.assertSequenceEqual(variable_names[3:] , SCREAMING_SNAKE_CASE )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} )
self.assertDictEqual(shapes["output_1"] , {0: "batch"} )
def snake_case ( self : int ):
lowercase__ : Optional[int] = ["input_ids", "attention_mask", "token_type_ids"]
lowercase__ : List[str] = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]}
lowercase__ , lowercase__ : str = ensure_valid_input(FuncContiguousArgs() , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(SCREAMING_SNAKE_CASE ) , set(SCREAMING_SNAKE_CASE ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(SCREAMING_SNAKE_CASE , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowercase__ , lowercase__ : Optional[Any] = ensure_valid_input(FuncNonContiguousArgs() , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["input_ids"] )
self.assertEqual(ordered_input_names[0] , "input_ids" )
def snake_case ( self : Any ):
lowercase__ : Dict = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" )
self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() )
| 81
|
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : CLIPSegForImageSegmentation , SCREAMING_SNAKE_CASE : CLIPSegProcessor , SCREAMING_SNAKE_CASE : AutoencoderKL , SCREAMING_SNAKE_CASE : CLIPTextModel , SCREAMING_SNAKE_CASE : CLIPTokenizer , SCREAMING_SNAKE_CASE : UNetaDConditionModel , SCREAMING_SNAKE_CASE : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , SCREAMING_SNAKE_CASE : StableDiffusionSafetyChecker , SCREAMING_SNAKE_CASE : CLIPImageProcessor , ):
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
lowercase__ : Optional[Any] = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE )
lowercase__ : int = dict(scheduler.config )
lowercase__ : Any = 1
lowercase__ : Union[str, Any] = FrozenDict(SCREAMING_SNAKE_CASE )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
lowercase__ : Optional[Any] = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = dict(scheduler.config )
lowercase__ : Union[str, Any] = True
lowercase__ : int = FrozenDict(SCREAMING_SNAKE_CASE )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=SCREAMING_SNAKE_CASE , segmentation_processor=SCREAMING_SNAKE_CASE , vae=SCREAMING_SNAKE_CASE , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE , )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase__ : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] ):
self.enable_attention_slicing(SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase__ : Union[str, Any] = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case ( self : Optional[Any] ):
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, List[str]] , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, PIL.Image.Image] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 50 , SCREAMING_SNAKE_CASE : float = 7.5 , SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , SCREAMING_SNAKE_CASE : Optional[int] = 1 , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE : Optional[str] = "pil" , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , SCREAMING_SNAKE_CASE : int = 1 , **SCREAMING_SNAKE_CASE : Optional[Any] , ):
lowercase__ : Dict = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
lowercase__ : int = self.segmentation_model(**SCREAMING_SNAKE_CASE )
lowercase__ : int = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowercase__ : List[str] = self.numpy_to_pil(SCREAMING_SNAKE_CASE )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowercase__ : int = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , mask_image=SCREAMING_SNAKE_CASE , height=SCREAMING_SNAKE_CASE , width=SCREAMING_SNAKE_CASE , num_inference_steps=SCREAMING_SNAKE_CASE , guidance_scale=SCREAMING_SNAKE_CASE , negative_prompt=SCREAMING_SNAKE_CASE , num_images_per_prompt=SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , latents=SCREAMING_SNAKE_CASE , output_type=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , callback=SCREAMING_SNAKE_CASE , callback_steps=SCREAMING_SNAKE_CASE , )
| 81
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import _LazyModule
SCREAMING_SNAKE_CASE_ = {"tokenization_tapex": ["TapexTokenizer"]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 597
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
snake_case_ = logging.get_logger(__name__)
snake_case_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
snake_case_ = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
snake_case_ = {
"""roberta-base""": 512,
"""roberta-large""": 512,
"""roberta-large-mnli""": 512,
"""distilroberta-base""": 512,
"""roberta-base-openai-detector""": 512,
"""roberta-large-openai-detector""": 512,
}
class a__ ( _lowercase ):
__magic_name__ : Any = VOCAB_FILES_NAMES
__magic_name__ : Any = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Dict = ["input_ids", "attention_mask"]
__magic_name__ : Tuple = RobertaTokenizer
def __init__(self : Optional[Any], __UpperCAmelCase : Dict=None, __UpperCAmelCase : Tuple=None, __UpperCAmelCase : Tuple=None, __UpperCAmelCase : Any="replace", __UpperCAmelCase : Dict="<s>", __UpperCAmelCase : List[Any]="</s>", __UpperCAmelCase : Union[str, Any]="</s>", __UpperCAmelCase : int="<s>", __UpperCAmelCase : Optional[Any]="<unk>", __UpperCAmelCase : Tuple="<pad>", __UpperCAmelCase : Union[str, Any]="<mask>", __UpperCAmelCase : Any=False, __UpperCAmelCase : Optional[int]=True, **__UpperCAmelCase : Optional[Any], ) -> Tuple:
"""simple docstring"""
super().__init__(
__UpperCAmelCase, __UpperCAmelCase, tokenizer_file=__UpperCAmelCase, errors=__UpperCAmelCase, bos_token=__UpperCAmelCase, eos_token=__UpperCAmelCase, sep_token=__UpperCAmelCase, cls_token=__UpperCAmelCase, unk_token=__UpperCAmelCase, pad_token=__UpperCAmelCase, mask_token=__UpperCAmelCase, add_prefix_space=__UpperCAmelCase, trim_offsets=__UpperCAmelCase, **__UpperCAmelCase, )
SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''', __UpperCAmelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE : str = getattr(__UpperCAmelCase, pre_tok_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : Dict = add_prefix_space
SCREAMING_SNAKE_CASE : Optional[int] = pre_tok_class(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Any = add_prefix_space
SCREAMING_SNAKE_CASE : Optional[int] = '''post_processor'''
SCREAMING_SNAKE_CASE : Any = getattr(self.backend_tokenizer, __UpperCAmelCase, __UpperCAmelCase )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE : Union[str, Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE : Optional[Any] = tuple(state['''sep'''] )
if "cls" in state:
SCREAMING_SNAKE_CASE : Tuple = tuple(state['''cls'''] )
SCREAMING_SNAKE_CASE : str = False
if state.get('''add_prefix_space''', __UpperCAmelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE : int = add_prefix_space
SCREAMING_SNAKE_CASE : List[str] = True
if state.get('''trim_offsets''', __UpperCAmelCase ) != trim_offsets:
SCREAMING_SNAKE_CASE : Any = trim_offsets
SCREAMING_SNAKE_CASE : Dict = True
if changes_to_apply:
SCREAMING_SNAKE_CASE : Tuple = getattr(__UpperCAmelCase, state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : List[Any] = component_class(**__UpperCAmelCase )
setattr(self.backend_tokenizer, __UpperCAmelCase, __UpperCAmelCase )
@property
def lowercase__ (self : int ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase__ (self : int, __UpperCAmelCase : List[str] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = AddedToken(__UpperCAmelCase, lstrip=__UpperCAmelCase, rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase, __UpperCAmelCase ) else value
SCREAMING_SNAKE_CASE : List[str] = value
def lowercase__ (self : Optional[int], *__UpperCAmelCase : List[Any], **__UpperCAmelCase : Optional[Any] ) -> BatchEncoding:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = kwargs.get('''is_split_into_words''', __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCAmelCase, **__UpperCAmelCase )
def lowercase__ (self : int, *__UpperCAmelCase : List[str], **__UpperCAmelCase : Tuple ) -> BatchEncoding:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = kwargs.get('''is_split_into_words''', __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCAmelCase, **__UpperCAmelCase )
def lowercase__ (self : Optional[Any], __UpperCAmelCase : str, __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self._tokenizer.model.save(__UpperCAmelCase, name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
def lowercase__ (self : Optional[int], __UpperCAmelCase : Dict, __UpperCAmelCase : List[str]=None ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase__ (self : Union[str, Any], __UpperCAmelCase : List[int], __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 507
| 0
|
def _lowerCAmelCase (_lowerCAmelCase):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase):
raise ValueError("Input must be an integer")
if input_num <= 0:
raise ValueError("Input must be positive")
return sum(
divisor for divisor in range(1 , input_num // 2 + 1) if input_num % divisor == 0)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 504
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
UpperCAmelCase : Optional[Any] =pytest.mark.integration
@pytest.mark.parametrize("path" , ["paws", "csv"])
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase):
inspect_dataset(_lowerCAmelCase , _lowerCAmelCase)
UpperCamelCase_ = path + ".py"
assert script_name in os.listdir(_lowerCAmelCase)
assert "__pycache__" not in os.listdir(_lowerCAmelCase)
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning")
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning")
@pytest.mark.parametrize("path" , ["accuracy"])
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase):
inspect_metric(_lowerCAmelCase , _lowerCAmelCase)
UpperCamelCase_ = path + ".py"
assert script_name in os.listdir(_lowerCAmelCase)
assert "__pycache__" not in os.listdir(_lowerCAmelCase)
@pytest.mark.parametrize(
"path, config_name, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = get_dataset_config_info(_lowerCAmelCase , config_name=_lowerCAmelCase)
assert info.config_name == config_name
assert list(info.splits.keys()) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
with pytest.raises(_lowerCAmelCase):
get_dataset_config_info(_lowerCAmelCase , config_name=_lowerCAmelCase)
@pytest.mark.parametrize(
"path, expected" , [
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] , )
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = get_dataset_config_names(_lowerCAmelCase)
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" , [
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] , )
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = get_dataset_infos(_lowerCAmelCase)
assert list(infos.keys()) == expected_configs
UpperCamelCase_ = expected_configs[0]
assert expected_config in infos
UpperCamelCase_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys()) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = get_dataset_infos(_lowerCAmelCase)
assert expected_config in infos
UpperCamelCase_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys()) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
with pytest.raises(_lowerCAmelCase):
get_dataset_split_names(_lowerCAmelCase , config_name=_lowerCAmelCase)
| 504
| 1
|
import requests
from bsa import BeautifulSoup
def snake_case_ ( lowerCAmelCase_ : str = "AAPL" ):
__lowercase : List[Any] = F"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
__lowercase : List[Any] = BeautifulSoup(requests.get(lowerCAmelCase_ ).text , """html.parser""" )
__lowercase : Optional[Any] = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 149
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase : List[str] = {
'''configuration_conditional_detr''': [
'''CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ConditionalDetrConfig''',
'''ConditionalDetrOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = ['''ConditionalDetrFeatureExtractor''']
lowerCamelCase : List[Any] = ['''ConditionalDetrImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
'''CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConditionalDetrForObjectDetection''',
'''ConditionalDetrForSegmentation''',
'''ConditionalDetrModel''',
'''ConditionalDetrPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
lowerCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 149
| 1
|
UpperCamelCase__ = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def _UpperCamelCase ():
"""simple docstring"""
UpperCamelCase__ = input("""Enter message: """ )
UpperCamelCase__ = input("""Enter key [alphanumeric]: """ )
UpperCamelCase__ = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
UpperCamelCase__ = """encrypt"""
UpperCamelCase__ = encrypt_message(a__ , a__ )
elif mode.lower().startswith("""d""" ):
UpperCamelCase__ = """decrypt"""
UpperCamelCase__ = decrypt_message(a__ , a__ )
print(f"""\n{mode.title()}ed message:""" )
print(a__ )
def _UpperCamelCase (a__ :str , a__ :str ):
"""simple docstring"""
return translate_message(a__ , a__ , """encrypt""" )
def _UpperCamelCase (a__ :str , a__ :str ):
"""simple docstring"""
return translate_message(a__ , a__ , """decrypt""" )
def _UpperCamelCase (a__ :str , a__ :str , a__ :str ):
"""simple docstring"""
UpperCamelCase__ = []
UpperCamelCase__ = 0
UpperCamelCase__ = key.upper()
for symbol in message:
UpperCamelCase__ = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(a__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(a__ ):
UpperCamelCase__ = 0
else:
translated.append(a__ )
return "".join(a__ )
if __name__ == "__main__":
main()
| 548
|
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Optional[Any] = """data2vec-audio"""
def __init__( self , __lowerCAmelCase=32 , __lowerCAmelCase=768 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-5 , __lowerCAmelCase="gelu" , __lowerCAmelCase=(512, 512, 512, 512, 512, 512, 512) , __lowerCAmelCase=(5, 2, 2, 2, 2, 2, 2) , __lowerCAmelCase=(10, 3, 3, 3, 3, 2, 2) , __lowerCAmelCase=False , __lowerCAmelCase=16 , __lowerCAmelCase=19 , __lowerCAmelCase=5 , __lowerCAmelCase=0.05 , __lowerCAmelCase=10 , __lowerCAmelCase=2 , __lowerCAmelCase=0.0 , __lowerCAmelCase=10 , __lowerCAmelCase=0 , __lowerCAmelCase="sum" , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=256 , __lowerCAmelCase=(512, 512, 512, 512, 1500) , __lowerCAmelCase=(5, 3, 3, 1, 1) , __lowerCAmelCase=(1, 2, 3, 1, 1) , __lowerCAmelCase=512 , __lowerCAmelCase=0 , __lowerCAmelCase=1 , __lowerCAmelCase=2 , __lowerCAmelCase=False , __lowerCAmelCase=3 , __lowerCAmelCase=2 , __lowerCAmelCase=3 , __lowerCAmelCase=None , **__lowerCAmelCase , ):
super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = feat_extract_activation
UpperCamelCase__ = list(__lowerCAmelCase )
UpperCamelCase__ = list(__lowerCAmelCase )
UpperCamelCase__ = list(__lowerCAmelCase )
UpperCamelCase__ = conv_bias
UpperCamelCase__ = num_conv_pos_embeddings
UpperCamelCase__ = num_conv_pos_embedding_groups
UpperCamelCase__ = conv_pos_kernel_size
UpperCamelCase__ = len(self.conv_dim )
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = activation_dropout
UpperCamelCase__ = feat_proj_dropout
UpperCamelCase__ = final_dropout
UpperCamelCase__ = layerdrop
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = initializer_range
UpperCamelCase__ = vocab_size
UpperCamelCase__ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase__ = mask_time_prob
UpperCamelCase__ = mask_time_length
UpperCamelCase__ = mask_time_min_masks
UpperCamelCase__ = mask_feature_prob
UpperCamelCase__ = mask_feature_length
UpperCamelCase__ = mask_feature_min_masks
# ctc loss
UpperCamelCase__ = ctc_loss_reduction
UpperCamelCase__ = ctc_zero_infinity
# adapter
UpperCamelCase__ = add_adapter
UpperCamelCase__ = adapter_kernel_size
UpperCamelCase__ = adapter_stride
UpperCamelCase__ = num_adapter_layers
UpperCamelCase__ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCamelCase__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCamelCase__ = list(__lowerCAmelCase )
UpperCamelCase__ = list(__lowerCAmelCase )
UpperCamelCase__ = list(__lowerCAmelCase )
UpperCamelCase__ = xvector_output_dim
@property
def _lowerCamelCase ( self ):
return math.prod(self.conv_stride )
| 548
| 1
|
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
__UpperCamelCase: List[Any] = TypeVar("""T""")
class __lowerCAmelCase ( Generic[T] ):
'''simple docstring'''
_A = 42 # Cache store of keys
_A = 42 # References of the keys in cache
_A = 10 # Maximum capacity of cache
def __init__( self: Tuple, lowerCamelCase_: int ):
lowercase__ : Optional[int] = deque()
lowercase__ : Optional[Any] = set()
if not n:
lowercase__ : Tuple = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
lowercase__ : List[Any] = n
def snake_case__( self: Union[str, Any], lowerCamelCase_: T ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
lowercase__ : Dict = self.dq_store.pop()
self.key_reference.remove(lowerCamelCase_ )
else:
self.dq_store.remove(lowerCamelCase_ )
self.dq_store.appendleft(lowerCamelCase_ )
self.key_reference.add(lowerCamelCase_ )
def snake_case__( self: Tuple ):
for k in self.dq_store:
print(lowerCamelCase_ )
def __repr__( self: Any ):
return F"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase: LRUCache[str | int] = LRUCache(4)
lru_cache.refer("""A""")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("""A""")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 266
|
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def SCREAMING_SNAKE_CASE__ ( _lowercase : dict ) -> tuple:
'''simple docstring'''
return (data["data"], data["target"])
def SCREAMING_SNAKE_CASE__ ( _lowercase : np.ndarray , _lowercase : np.ndarray , _lowercase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
lowercase__ : List[str] = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(_lowercase , _lowercase )
# Predict target for test data
lowercase__ : Union[str, Any] = xgb.predict(_lowercase )
lowercase__ : Tuple = predictions.reshape(len(_lowercase ) , 1 )
return predictions
def SCREAMING_SNAKE_CASE__ ( ) -> None:
'''simple docstring'''
lowercase__ : List[str] = fetch_california_housing()
lowercase__ , lowercase__ : Optional[int] = data_handling(_lowercase )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[Any] = train_test_split(
_lowercase , _lowercase , test_size=0.25 , random_state=1 )
lowercase__ : Any = xgboost(_lowercase , _lowercase , _lowercase )
# Error printing
print(f"""Mean Absolute Error : {mean_absolute_error(_lowercase , _lowercase )}""" )
print(f"""Mean Square Error : {mean_squared_error(_lowercase , _lowercase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 266
| 1
|
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 0, 0, 0
SCREAMING_SNAKE_CASE__ = ugly_nums[ia] * 2
SCREAMING_SNAKE_CASE__ = ugly_nums[ia] * 3
SCREAMING_SNAKE_CASE__ = ugly_nums[ia] * 5
for _ in range(1 , __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = min(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
ugly_nums.append(__UpperCamelCase )
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"""{ugly_numbers(200) = }""")
| 379
|
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class __snake_case ( lowerCamelCase_ ):
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def __a ( self : Tuple ):
"""simple docstring"""
with self.assertRaises(_lowercase ):
SCREAMING_SNAKE_CASE__ = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def __a ( self : int ):
"""simple docstring"""
with self.assertRaises(_lowercase ):
SCREAMING_SNAKE_CASE__ = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) )
def __a ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def __a ( self : Optional[Any] ):
"""simple docstring"""
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
SCREAMING_SNAKE_CASE__ = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def __a ( self : Dict ):
"""simple docstring"""
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
SCREAMING_SNAKE_CASE__ = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) )
def __a ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def __a ( self : List[str] ):
"""simple docstring"""
import PIL.Image
SCREAMING_SNAKE_CASE__ = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"""datasets.arrow_writer.cast_to_python_objects""" , side_effect=_lowercase ) as mock_cast_to_python_objects:
SCREAMING_SNAKE_CASE__ = pa.array(TypedSequence([{"""path""": None, """bytes""": b"""image_bytes"""}, pil_image] , type=Image() ) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("""optimize_list_casting""" , _lowercase )
self.assertFalse(kwargs["""optimize_list_casting"""] )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = pa.BufferReader(__UpperCamelCase ) if isinstance(__UpperCamelCase , pa.Buffer ) else pa.memory_map(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = pa.ipc.open_stream(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = pa.BufferOutputStream()
SCREAMING_SNAKE_CASE__ = pa.schema(__UpperCamelCase ) if fields else None
with ArrowWriter(stream=__UpperCamelCase , schema=__UpperCamelCase , writer_batch_size=__UpperCamelCase ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE__ = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__UpperCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __SCREAMING_SNAKE_CASE ( ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = pa.BufferOutputStream()
SCREAMING_SNAKE_CASE__ = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} )
with ArrowWriter(stream=__UpperCamelCase , features=__UpperCamelCase ) as writer:
writer.write({"""labels""": 0} )
writer.write({"""labels""": 1} )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
SCREAMING_SNAKE_CASE__ = pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE__ = pa.ipc.open_stream(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = f.read_all()
SCREAMING_SNAKE_CASE__ = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(__UpperCamelCase )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = pa.BufferOutputStream()
with ArrowWriter(
stream=__UpperCamelCase , writer_batch_size=__UpperCamelCase , hash_salt="""split_name""" , check_duplicates=__UpperCamelCase , ) as writer:
with pytest.raises(__UpperCamelCase ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = pa.BufferOutputStream()
with ArrowWriter(
stream=__UpperCamelCase , writer_batch_size=__UpperCamelCase , hash_salt="""split_name""" , check_duplicates=__UpperCamelCase , ) as writer:
with pytest.raises(__UpperCamelCase ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=10 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=10 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = pa.BufferOutputStream()
with ArrowWriter(
stream=__UpperCamelCase , writer_batch_size=__UpperCamelCase , hash_salt="""split_name""" , check_duplicates=__UpperCamelCase , ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = pa.BufferOutputStream()
SCREAMING_SNAKE_CASE__ = pa.schema(__UpperCamelCase ) if fields else None
with ArrowWriter(stream=__UpperCamelCase , schema=__UpperCamelCase , writer_batch_size=__UpperCamelCase ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
writer.write_batch({"""col_1""": [], """col_2""": []} )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE__ = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__UpperCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = pa.BufferOutputStream()
SCREAMING_SNAKE_CASE__ = pa.schema(__UpperCamelCase ) if fields else None
with ArrowWriter(stream=__UpperCamelCase , schema=__UpperCamelCase , writer_batch_size=__UpperCamelCase ) as writer:
writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE__ = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__UpperCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = pa.BufferOutputStream()
SCREAMING_SNAKE_CASE__ = pa.schema(__UpperCamelCase ) if fields else None
with ArrowWriter(stream=__UpperCamelCase , schema=__UpperCamelCase , writer_batch_size=__UpperCamelCase ) as writer:
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) )
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE__ = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__UpperCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
SCREAMING_SNAKE_CASE__ = os.path.join(__UpperCamelCase , """test.arrow""" )
with ArrowWriter(path=__UpperCamelCase , schema=pa.schema(__UpperCamelCase ) ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(__UpperCamelCase , metadata=writer._schema.metadata )
_check_output(__UpperCamelCase , 1 )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
if pa.types.is_list(__UpperCamelCase ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
if isinstance(lst[0] , __UpperCamelCase ):
change_first_primitive_element_in_list(lst[0] , __UpperCamelCase )
else:
SCREAMING_SNAKE_CASE__ = value
@pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = pa.array(TypedSequence(__UpperCamelCase , optimized_int_type=__UpperCamelCase ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"""col, expected_dtype""" , [
("""attention_mask""", pa.inta()),
("""special_tokens_mask""", pa.inta()),
("""token_type_ids""", pa.inta()),
("""input_ids""", pa.intaa()),
("""other""", pa.intaa()),
] , )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = pa.array(OptimizedTypedSequence(__UpperCamelCase , col=__UpperCamelCase ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
SCREAMING_SNAKE_CASE__ = copy.deepcopy(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE__ = pa.array(OptimizedTypedSequence(__UpperCamelCase , col=__UpperCamelCase ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("""raise_exception""" , [False, True] )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = str(tmp_path / """dataset-train.arrow""" )
try:
with ArrowWriter(path=__UpperCamelCase ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """mock://dataset-train.arrow"""
with ArrowWriter(path=__UpperCamelCase , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(__UpperCamelCase ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(__UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = pa.BufferOutputStream()
with ParquetWriter(stream=__UpperCamelCase ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
SCREAMING_SNAKE_CASE__ = pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE__ = pq.read_table(__UpperCamelCase )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("""embed_local_files""" , [False, True] )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
import PIL.Image
SCREAMING_SNAKE_CASE__ = str(tmp_path / """test_image_rgb.jpg""" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(__UpperCamelCase , format="""png""" )
SCREAMING_SNAKE_CASE__ = pa.BufferOutputStream()
with ParquetWriter(
stream=__UpperCamelCase , features=Features({"""image""": Image()} ) , embed_local_files=__UpperCamelCase ) as writer:
writer.write({"""image""": image_path} )
writer.finalize()
SCREAMING_SNAKE_CASE__ = pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE__ = pq.read_table(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["""image"""][0]["""path"""] , __UpperCamelCase )
with open(__UpperCamelCase , """rb""" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = pa.schema([pa.field("""col_1""" , pa.string() , nullable=__UpperCamelCase )] )
SCREAMING_SNAKE_CASE__ = pa.BufferOutputStream()
with ArrowWriter(stream=__UpperCamelCase ) as writer:
writer._build_writer(inferred_schema=__UpperCamelCase )
assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string() )] )
| 379
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : List[str] = logging.get_logger(__name__)
UpperCamelCase : Optional[int] = {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'speech_to_text'
_UpperCamelCase = ['past_key_values']
_UpperCamelCase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self ,_lowerCAmelCase=1_00_00 ,_lowerCAmelCase=12 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=4 ,_lowerCAmelCase=6 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=4 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase="relu" ,_lowerCAmelCase=2_56 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=2 ,_lowerCAmelCase=True ,_lowerCAmelCase=1 ,_lowerCAmelCase=0 ,_lowerCAmelCase=2 ,_lowerCAmelCase=60_00 ,_lowerCAmelCase=10_24 ,_lowerCAmelCase=2 ,_lowerCAmelCase=(5, 5) ,_lowerCAmelCase=10_24 ,_lowerCAmelCase=80 ,_lowerCAmelCase=1 ,**_lowerCAmelCase ,):
lowerCamelCase__ = vocab_size
lowerCamelCase__ = d_model
lowerCamelCase__ = encoder_ffn_dim
lowerCamelCase__ = encoder_layers
lowerCamelCase__ = encoder_attention_heads
lowerCamelCase__ = decoder_ffn_dim
lowerCamelCase__ = decoder_layers
lowerCamelCase__ = decoder_attention_heads
lowerCamelCase__ = dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = activation_dropout
lowerCamelCase__ = activation_function
lowerCamelCase__ = init_std
lowerCamelCase__ = encoder_layerdrop
lowerCamelCase__ = decoder_layerdrop
lowerCamelCase__ = use_cache
lowerCamelCase__ = encoder_layers
lowerCamelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase__ = max_source_positions
lowerCamelCase__ = max_target_positions
lowerCamelCase__ = num_conv_layers
lowerCamelCase__ = list(_lowerCAmelCase )
lowerCamelCase__ = conv_channels
lowerCamelCase__ = input_feat_per_channel
lowerCamelCase__ = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
F'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=_lowerCAmelCase ,bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,is_encoder_decoder=_lowerCAmelCase ,decoder_start_token_id=_lowerCAmelCase ,**_lowerCAmelCase ,)
| 50
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase_ ( __a ):
def __init__( self : Dict , _A : List[str] , _A : int ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self : List[Any] , _A : int = 1 , _A : int = 100 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[float] = None , _A : bool = True , ):
'''simple docstring'''
if audio_length_in_s is None:
UpperCAmelCase__ : List[str] = self.unet.config.sample_size / self.unet.config.sample_rate
UpperCAmelCase__ : Union[str, Any] = audio_length_in_s * self.unet.config.sample_rate
UpperCAmelCase__ : List[Any] = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"""{audio_length_in_s} is too small. Make sure it's bigger or equal to"""
f""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
UpperCAmelCase__ : List[Any] = int(_A )
if sample_size % down_scale_factor != 0:
UpperCAmelCase__ : int = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
f""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
''' process.''' )
UpperCAmelCase__ : Dict = int(_A )
UpperCAmelCase__ : Optional[Any] = next(iter(self.unet.parameters() ) ).dtype
UpperCAmelCase__ : int = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(_A )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCAmelCase__ : Optional[int] = randn_tensor(_A , generator=_A , device=self.device , dtype=_A )
# set step values
self.scheduler.set_timesteps(_A , device=audio.device )
UpperCAmelCase__ : List[str] = self.scheduler.timesteps.to(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCAmelCase__ : Optional[int] = self.unet(_A , _A ).sample
# 2. compute previous image: x_t -> t_t-1
UpperCAmelCase__ : List[Any] = self.scheduler.step(_A , _A , _A ).prev_sample
UpperCAmelCase__ : Any = audio.clamp(-1 , 1 ).float().cpu().numpy()
UpperCAmelCase__ : Any = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_A )
| 75
| 0
|
from math import isclose, sqrt
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> tuple[float, float, float]:
snake_case__ = point_y / 4 / point_x
snake_case__ = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
snake_case__ = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
snake_case__ = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
snake_case__ = outgoing_gradient**2 + 4
snake_case__ = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
snake_case__ = (point_y - outgoing_gradient * point_x) ** 2 - 100
snake_case__ = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
snake_case__ = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
snake_case__ = x_minus if isclose(__lowerCAmelCase , __lowerCAmelCase ) else x_plus
snake_case__ = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 1.4 , __lowerCAmelCase = -9.6 ) -> int:
snake_case__ = 0
snake_case__ = first_x_coord
snake_case__ = first_y_coord
snake_case__ = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
snake_case__ , snake_case__ , snake_case__ = next_point(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"""{solution() = }""")
| 208
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCamelCase__ : Tuple = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"""
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
snake_case__ = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
snake_case__ = get_sagemaker_input()
else:
snake_case__ = get_cluster_input()
return config
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase=None ) -> int:
if subparsers is not None:
snake_case__ = subparsers.add_parser('''config''' , description=__lowerCAmelCase )
else:
snake_case__ = argparse.ArgumentParser('''Accelerate config command''' , description=__lowerCAmelCase )
parser.add_argument(
'''--config_file''' , default=__lowerCAmelCase , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=__lowerCAmelCase )
return parser
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
snake_case__ = get_user_input()
if args.config_file is not None:
snake_case__ = args.config_file
else:
if not os.path.isdir(__lowerCAmelCase ):
os.makedirs(__lowerCAmelCase )
snake_case__ = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(__lowerCAmelCase )
else:
config.to_yaml_file(__lowerCAmelCase )
print(F"""accelerate configuration saved at {config_file}""" )
def SCREAMING_SNAKE_CASE ( ) -> Any:
snake_case__ = config_command_parser()
snake_case__ = parser.parse_args()
config_command(__lowerCAmelCase )
if __name__ == "__main__":
main()
| 208
| 1
|
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) , end='''\t''' )
else:
print('''INF''' , end='''\t''' )
print()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = [[float('''inf''' ) for _ in range(SCREAMING_SNAKE_CASE )] for _ in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
lowercase__ = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(SCREAMING_SNAKE_CASE ):
# looping through rows of graph array
for i in range(SCREAMING_SNAKE_CASE ):
# looping through columns of graph array
for j in range(SCREAMING_SNAKE_CASE ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
lowercase__ = dist[i][k] + dist[k][j]
_print_dist(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return dist, v
if __name__ == "__main__":
lowerCAmelCase = int(input('Enter number of vertices: '))
lowerCAmelCase = int(input('Enter number of edges: '))
lowerCAmelCase = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
lowerCAmelCase = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
lowerCAmelCase = int(input('Enter source:'))
lowerCAmelCase = int(input('Enter destination:'))
lowerCAmelCase = float(input('Enter weight:'))
lowerCAmelCase = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 43
|
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = 'T5Config'
class _a ( UpperCamelCase__ ):
_lowercase : Optional[int] = '''mt5'''
_lowercase : str = MTaConfig
class _a ( UpperCamelCase__ ):
_lowercase : Optional[Any] = '''mt5'''
_lowercase : Optional[Any] = MTaConfig
class _a ( UpperCamelCase__ ):
_lowercase : Tuple = '''mt5'''
_lowercase : Optional[Any] = MTaConfig
| 43
| 1
|
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_lowercase = logging.get_logger(__name__)
class lowerCamelCase__ ( A__ ):
def __init__( self : List[str] , *__a : int , **__a : Dict ):
'''simple docstring'''
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" , __a , )
super().__init__(*__a , **__a )
| 242
|
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowerCamelCase__ :
def __init__( self : str , __a : List[str] , __a : Union[str, Any]=None , __a : List[Any]=None , __a : Tuple=None , __a : Any="resnet50" , __a : List[str]=3 , __a : str=32 , __a : str=3 , __a : Tuple=True , __a : Dict=True , ):
'''simple docstring'''
lowerCamelCase__: int = parent
lowerCamelCase__: Tuple = out_indices if out_indices is not None else [4]
lowerCamelCase__: List[Any] = stage_names
lowerCamelCase__: int = out_features
lowerCamelCase__: Tuple = backbone
lowerCamelCase__: Tuple = batch_size
lowerCamelCase__: List[Any] = image_size
lowerCamelCase__: List[Any] = num_channels
lowerCamelCase__: Any = use_pretrained_backbone
lowerCamelCase__: Union[str, Any] = is_training
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__: List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__: List[str] = self.get_config()
return config, pixel_values
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowerCamelCase_ ( self : Any , __a : Optional[int] , __a : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__: List[Any] = TimmBackbone(config=__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase__: int = model(__a )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__: List[str] = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__: str = config_and_inputs
lowerCamelCase__: Dict = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowerCamelCase__ ( A__ , A__ , A__ , unittest.TestCase ):
__lowerCamelCase = (TimmBackbone,) if is_torch_available() else ()
__lowerCamelCase = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__: List[str] = TimmBackboneModelTester(self )
lowerCamelCase__: Tuple = ConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowerCamelCase__: Union[str, Any] = """resnet18"""
lowerCamelCase__: List[str] = """microsoft/resnet-18"""
lowerCamelCase__: Dict = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a )
lowerCamelCase__: int = AutoBackbone.from_pretrained(__a )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
lowerCamelCase__: List[Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a , out_indices=[1, 2, 3] )
lowerCamelCase__: Optional[Any] = AutoBackbone.from_pretrained(__a , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("""TimmBackbone doesn't support feed forward chunking""" )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""" )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone doesn't support output_attentions.""" )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip("""Safetensors is not supported by timm.""" )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__: Optional[Any] = model_class(__a )
lowerCamelCase__: Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__: Optional[Any] = [*signature.parameters.keys()]
lowerCamelCase__: Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__: List[Any] = True
lowerCamelCase__: Any = self.has_attentions
# no need to test all models as different heads yield the same functionality
lowerCamelCase__: Optional[int] = self.all_model_classes[0]
lowerCamelCase__: Tuple = model_class(__a )
model.to(__a )
lowerCamelCase__: Any = self._prepare_for_class(__a , __a )
lowerCamelCase__: int = model(**__a )
lowerCamelCase__: List[str] = outputs[0][-1]
# Encoder-/Decoder-only models
lowerCamelCase__: str = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
lowerCamelCase__: Tuple = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__a )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__: Dict = model_class(__a )
model.to(__a )
model.eval()
lowerCamelCase__: Dict = model(**__a )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
lowerCamelCase__: Optional[Any] = copy.deepcopy(__a )
lowerCamelCase__: str = None
lowerCamelCase__: int = model_class(__a )
model.to(__a )
model.eval()
lowerCamelCase__: Union[str, Any] = model(**__a )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
lowerCamelCase__: Optional[int] = copy.deepcopy(__a )
lowerCamelCase__: str = False
lowerCamelCase__: List[Any] = model_class(__a )
model.to(__a )
model.eval()
lowerCamelCase__: Dict = model(**__a )
| 242
| 1
|
'''simple docstring'''
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
A__ : Union[str, Any] =logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
return max(metric_fn(lowerCAmelCase , lowerCAmelCase ) for gt in ground_truths )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = [line.strip() for line in open(lowerCAmelCase , """r""" ).readlines()]
_lowerCAmelCase = []
if args.gold_data_mode == "qa":
_lowerCAmelCase = pd.read_csv(lowerCAmelCase , sep="""\t""" , header=lowerCAmelCase )
for answer_list in data[1]:
_lowerCAmelCase = ast.literal_eval(lowerCAmelCase )
answers.append(lowerCAmelCase )
else:
_lowerCAmelCase = [line.strip() for line in open(lowerCAmelCase , """r""" ).readlines()]
_lowerCAmelCase = [[reference] for reference in references]
_lowerCAmelCase = _lowerCAmelCase = _lowerCAmelCase = 0
for prediction, ground_truths in zip(lowerCAmelCase , lowerCAmelCase ):
total += 1
em += metric_max_over_ground_truths(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
fa += metric_max_over_ground_truths(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
_lowerCAmelCase = 100.0 * em / total
_lowerCAmelCase = 100.0 * fa / total
logger.info(f"F1: {fa:.2f}" )
logger.info(f"EM: {em:.2f}" )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = args.k
_lowerCAmelCase = [line.strip() for line in open(lowerCAmelCase , """r""" ).readlines()]
_lowerCAmelCase = [line.strip() for line in open(lowerCAmelCase , """r""" ).readlines()]
_lowerCAmelCase = _lowerCAmelCase = 0
for hypo, reference in zip(lowerCAmelCase , lowerCAmelCase ):
_lowerCAmelCase = set(hypo.split("""\t""" )[:k] )
_lowerCAmelCase = set(reference.split("""\t""" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
_lowerCAmelCase = 100.0 * em / total
logger.info(f"Precision@{k}: {em: .2f}" )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
def strip_title(lowerCAmelCase ):
if title.startswith("""\"""" ):
_lowerCAmelCase = title[1:]
if title.endswith("""\"""" ):
_lowerCAmelCase = title[:-1]
return title
_lowerCAmelCase = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCAmelCase , return_tensors="""pt""" , padding=lowerCAmelCase , truncation=lowerCAmelCase , )["""input_ids"""].to(args.device )
_lowerCAmelCase = rag_model.rag.question_encoder(lowerCAmelCase )
_lowerCAmelCase = question_enc_outputs[0]
_lowerCAmelCase = rag_model.retriever(
lowerCAmelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , )
_lowerCAmelCase = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
_lowerCAmelCase = []
for docs in all_docs:
_lowerCAmelCase = [strip_title(lowerCAmelCase ) for title in docs["""title"""]]
provenance_strings.append("""\t""".join(lowerCAmelCase ) )
return provenance_strings
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
with torch.no_grad():
_lowerCAmelCase = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCAmelCase , return_tensors="""pt""" , padding=lowerCAmelCase , truncation=lowerCAmelCase )
_lowerCAmelCase = inputs_dict.input_ids.to(args.device )
_lowerCAmelCase = inputs_dict.attention_mask.to(args.device )
_lowerCAmelCase = rag_model.generate( # rag_model overwrites generate
lowerCAmelCase , attention_mask=lowerCAmelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=lowerCAmelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
_lowerCAmelCase = rag_model.retriever.generator_tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase )
if args.print_predictions:
for q, a in zip(lowerCAmelCase , lowerCAmelCase ):
logger.info("""Q: {} - A: {}""".format(lowerCAmelCase , lowerCAmelCase ) )
return answers
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=lowerCAmelCase , help=(
"""RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"""
""" model_name_or_path"""
) , )
parser.add_argument(
"""--index_name""" , default=lowerCAmelCase , choices=["""exact""", """compressed""", """legacy"""] , type=lowerCAmelCase , help="""RAG model retriever type""" , )
parser.add_argument(
"""--index_path""" , default=lowerCAmelCase , type=lowerCAmelCase , help="""Path to the retrieval index""" , )
parser.add_argument("""--n_docs""" , default=5 , type=lowerCAmelCase , help="""Number of retrieved docs""" )
parser.add_argument(
"""--model_name_or_path""" , default=lowerCAmelCase , type=lowerCAmelCase , required=lowerCAmelCase , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=lowerCAmelCase , help=(
"""Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"""
""" precision@k."""
) , )
parser.add_argument("""--k""" , default=1 , type=lowerCAmelCase , help="""k for the precision@k calculation""" )
parser.add_argument(
"""--evaluation_set""" , default=lowerCAmelCase , type=lowerCAmelCase , required=lowerCAmelCase , help="""Path to a file containing evaluation samples""" , )
parser.add_argument(
"""--gold_data_path""" , default=lowerCAmelCase , type=lowerCAmelCase , required=lowerCAmelCase , help="""Path to a tab-separated file with gold samples""" , )
parser.add_argument(
"""--gold_data_mode""" , default="""qa""" , type=lowerCAmelCase , choices=["""qa""", """ans"""] , help=(
"""Format of the gold data file"""
"""qa - a single line in the following format: question [tab] answer_list"""
"""ans - a single line of the gold file contains the expected answer string"""
) , )
parser.add_argument(
"""--predictions_path""" , type=lowerCAmelCase , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , )
parser.add_argument(
"""--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , )
parser.add_argument(
"""--eval_batch_size""" , default=8 , type=lowerCAmelCase , help="""Batch size per GPU/CPU for evaluation.""" , )
parser.add_argument(
"""--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , )
parser.add_argument(
"""--num_beams""" , default=4 , type=lowerCAmelCase , help="""Number of beams to be used when generating answers""" , )
parser.add_argument("""--min_length""" , default=1 , type=lowerCAmelCase , help="""Min length of the generated answers""" )
parser.add_argument("""--max_length""" , default=50 , type=lowerCAmelCase , help="""Max length of the generated answers""" )
parser.add_argument(
"""--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , )
parser.add_argument(
"""--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , )
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
return args
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = {}
if args.model_type is None:
_lowerCAmelCase = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("""rag""" ):
_lowerCAmelCase = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration
_lowerCAmelCase = args.n_docs
if args.index_name is not None:
_lowerCAmelCase = args.index_name
if args.index_path is not None:
_lowerCAmelCase = args.index_path
else:
_lowerCAmelCase = BartForConditionalGeneration
_lowerCAmelCase = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("""Evaluate the following checkpoints: %s""" , lowerCAmelCase )
_lowerCAmelCase = get_scores if args.eval_mode == """e2e""" else get_precision_at_k
_lowerCAmelCase = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) )
score_fn(lowerCAmelCase , args.predictions_path , args.gold_data_path )
continue
logger.info("""***** Running evaluation for {} *****""".format(lowerCAmelCase ) )
logger.info(""" Batch size = %d""" , args.eval_batch_size )
logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) )
if args.model_type.startswith("""rag""" ):
_lowerCAmelCase = RagRetriever.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
_lowerCAmelCase = model_class.from_pretrained(lowerCAmelCase , retriever=lowerCAmelCase , **lowerCAmelCase )
model.retriever.init_retrieval()
else:
_lowerCAmelCase = model_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
model.to(args.device )
with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file:
_lowerCAmelCase = []
for line in tqdm(lowerCAmelCase ):
questions.append(line.strip() )
if len(lowerCAmelCase ) == args.eval_batch_size:
_lowerCAmelCase = evaluate_batch_fn(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
preds_file.write("""\n""".join(lowerCAmelCase ) + """\n""" )
preds_file.flush()
_lowerCAmelCase = []
if len(lowerCAmelCase ) > 0:
_lowerCAmelCase = evaluate_batch_fn(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
preds_file.write("""\n""".join(lowerCAmelCase ) )
preds_file.flush()
score_fn(lowerCAmelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
A__ : Tuple =get_args()
main(args)
| 207
|
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class UpperCAmelCase ( snake_case_ ):
def __init__( self : Union[str, Any] , __snake_case : Callable , __snake_case : Optional[Features] = None , __snake_case : str = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[dict] = None , __snake_case : Optional[int] = None , **__snake_case : Optional[Any] , ) -> Union[str, Any]:
super().__init__(
features=__snake_case , cache_dir=__snake_case , keep_in_memory=__snake_case , streaming=__snake_case , num_proc=__snake_case , **__snake_case , )
_lowerCAmelCase = Generator(
cache_dir=__snake_case , features=__snake_case , generator=__snake_case , gen_kwargs=__snake_case , **__snake_case , )
def lowercase__ ( self : str ) -> Dict:
# Build iterable dataset
if self.streaming:
_lowerCAmelCase = self.builder.as_streaming_dataset(split="""train""" )
# Build regular (map-style) dataset
else:
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
self.builder.download_and_prepare(
download_config=__snake_case , download_mode=__snake_case , verification_mode=__snake_case , base_path=__snake_case , num_proc=self.num_proc , )
_lowerCAmelCase = self.builder.as_dataset(
split="""train""" , verification_mode=__snake_case , in_memory=self.keep_in_memory )
return dataset
| 207
| 1
|
'''simple docstring'''
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 'EncodecFeatureExtractor'
UpperCamelCase__ = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self , snake_case_ , snake_case_ ):
super().__init__(snake_case_ , snake_case_ )
lowercase =self.feature_extractor
lowercase =False
def _A( self , snake_case_=None , snake_case_=None , snake_case_=True ):
return self.tokenizer.get_decoder_prompt_ids(task=snake_case_ , language=snake_case_ , no_timestamps=snake_case_ )
def __call__( self , *snake_case_ , **snake_case_ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*snake_case_ , **snake_case_ )
lowercase =kwargs.pop('''audio''' , snake_case_ )
lowercase =kwargs.pop('''sampling_rate''' , snake_case_ )
lowercase =kwargs.pop('''text''' , snake_case_ )
if len(snake_case_ ) > 0:
lowercase =args[0]
lowercase =args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if text is not None:
lowercase =self.tokenizer(snake_case_ , **snake_case_ )
if audio is not None:
lowercase =self.feature_extractor(snake_case_ , *snake_case_ , sampling_rate=snake_case_ , **snake_case_ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
lowercase =audio_inputs['''input_values''']
if "padding_mask" in audio_inputs:
lowercase =audio_inputs['''padding_mask''']
return inputs
def _A( self , *snake_case_ , **snake_case_ ):
lowercase =kwargs.pop('''audio''' , snake_case_ )
lowercase =kwargs.pop('''padding_mask''' , snake_case_ )
if len(snake_case_ ) > 0:
lowercase =args[0]
lowercase =args[1:]
if audio_values is not None:
return self._decode_audio(snake_case_ , padding_mask=snake_case_ )
else:
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def _A( self , *snake_case_ , **snake_case_ ):
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
def _A( self , snake_case_ , snake_case_ = None ):
lowercase =to_numpy(snake_case_ )
lowercase , lowercase , lowercase =audio_values.shape
if padding_mask is None:
return list(snake_case_ )
lowercase =to_numpy(snake_case_ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
lowercase =seq_len - padding_mask.shape[-1]
lowercase =1 - self.feature_extractor.padding_value
lowercase =np.pad(snake_case_ , ((0, 0), (0, difference)) , '''constant''' , constant_values=snake_case_ )
lowercase =audio_values.tolist()
for i in range(snake_case_ ):
lowercase =np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
lowercase =sliced_audio.reshape(snake_case_ , -1 )
return audio_values
| 145
|
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : List[str] = logging.get_logger(__name__)
def UpperCamelCase ( lowercase_ : Optional[Any] ) -> Dict:
'''simple docstring'''
lowercase =torch.load(lowercase_ , map_location='''cpu''' )
if "model" in sd.keys():
lowercase =torch.load(lowercase_ , map_location='''cpu''' )['''model''']
# pop unnecessary weights
lowercase =[
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowercase_ )
lowercase ={
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
lowercase =sd.pop(lowercase_ )
lowercase =list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
lowercase =sd[key]
# We split QKV in separate Q,K,V
lowercase =key.replace('''.qkv_proj.''' , '''.q_proj.''' )
lowercase =key.replace('''.qkv_proj.''' , '''.k_proj.''' )
lowercase =key.replace('''.qkv_proj.''' , '''.v_proj.''' )
lowercase =value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
lowercase , lowercase , lowercase =torch.split(lowercase_ , depth // 3 , dim=0 )
lowercase =q
lowercase =k
lowercase =v
del sd[key]
return sd
@torch.no_grad()
def UpperCamelCase ( lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any]=None ) -> Optional[int]:
'''simple docstring'''
lowercase =load_checkpoint(lowercase_ )
if config is not None:
lowercase =OPTConfig.from_pretrained(lowercase_ )
else:
lowercase =OPTConfig()
lowercase =OPTModel(lowercase_ ).half().eval()
model.load_state_dict(lowercase_ )
# Check results
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
if __name__ == "__main__":
_UpperCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
_UpperCAmelCase : List[str] = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 145
| 1
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
a_ : str = logging.getLogger(__name__)
a_ : Dict = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
a_ : Tuple = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _snake_case :
_lowercase : Optional[str] = field(
default=A__ , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(A__ )} , )
_lowercase : Optional[str] = field(
default=A__ , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_lowercase : bool = field(
default=A__ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
_lowercase : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
_lowercase : bool = field(
default=A__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path')
@dataclass
class _snake_case :
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
_lowercase : Optional[str] = field(default=A__ , metadata={'''help''': '''The input training data file (a text file).'''} )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
_lowercase : bool = field(
default=A__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
_lowercase : Optional[int] = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
_lowercase : Optional[int] = field(
default=A__ , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
_lowercase : Optional[int] = field(
default=A__ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
_lowercase : float = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
_lowercase : bool = field(
default=A__ , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
if self.train_file is not None:
SCREAMING_SNAKE_CASE = self.train_file.split('.')[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
SCREAMING_SNAKE_CASE = self.validation_file.split('.')[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
with open(_UpperCAmelCase , 'r' , encoding='utf-8') as f:
SCREAMING_SNAKE_CASE = [json.loads(_UpperCAmelCase) for line in f.read().splitlines() if (len(_UpperCAmelCase) > 0 and not line.isspace())]
assert len(_UpperCAmelCase) == len(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = {c: dataset[c] for c in dataset.column_names}
SCREAMING_SNAKE_CASE = refs
return Dataset.from_dict(_UpperCAmelCase)
def lowerCamelCase__ ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith('.json'):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.')
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.')
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout)] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}''')
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _UpperCAmelCase)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
SCREAMING_SNAKE_CASE = load_dataset(data_args.dataset_name , data_args.dataset_config_name)
if "validation" not in datasets.keys():
SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''train[:{data_args.validation_split_percentage}%]''' , )
SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''train[{data_args.validation_split_percentage}%:]''' , )
else:
SCREAMING_SNAKE_CASE = {}
if data_args.train_file is not None:
SCREAMING_SNAKE_CASE = data_args.train_file
if data_args.validation_file is not None:
SCREAMING_SNAKE_CASE = data_args.validation_file
SCREAMING_SNAKE_CASE = data_args.train_file.split('.')[-1]
if extension == "txt":
SCREAMING_SNAKE_CASE = 'text'
SCREAMING_SNAKE_CASE = load_dataset(_UpperCAmelCase , data_files=_UpperCAmelCase)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(model_args.config_name , **_UpperCAmelCase)
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase)
else:
SCREAMING_SNAKE_CASE = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.')
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''')
config.update_from_string(model_args.config_overrides)
logger.info(F'''New config: {config}''')
SCREAMING_SNAKE_CASE = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **_UpperCAmelCase)
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.')
if model_args.model_name_or_path:
SCREAMING_SNAKE_CASE = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch')
SCREAMING_SNAKE_CASE = AutoModelForMaskedLM.from_config(_UpperCAmelCase)
model.resize_token_embeddings(len(_UpperCAmelCase))
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
SCREAMING_SNAKE_CASE = datasets['train'].column_names
else:
SCREAMING_SNAKE_CASE = datasets['validation'].column_names
SCREAMING_SNAKE_CASE = 'text' if 'text' in column_names else column_names[0]
SCREAMING_SNAKE_CASE = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(_UpperCAmelCase):
# Remove empty lines
SCREAMING_SNAKE_CASE = [line for line in examples['text'] if len(_UpperCAmelCase) > 0 and not line.isspace()]
return tokenizer(examples['text'] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=data_args.max_seq_length)
SCREAMING_SNAKE_CASE = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
SCREAMING_SNAKE_CASE = add_chinese_references(tokenized_datasets['train'] , data_args.train_ref_file)
if data_args.validation_ref_file is not None:
SCREAMING_SNAKE_CASE = add_chinese_references(
tokenized_datasets['validation'] , data_args.validation_ref_file)
# If we have ref files, need to avoid it removed by trainer
SCREAMING_SNAKE_CASE = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
SCREAMING_SNAKE_CASE = False
# Data collator
# This one will take care of randomly masking the tokens.
SCREAMING_SNAKE_CASE = DataCollatorForWholeWordMask(tokenizer=_UpperCAmelCase , mlm_probability=data_args.mlm_probability)
# Initialize our Trainer
SCREAMING_SNAKE_CASE = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=tokenized_datasets['train'] if training_args.do_train else None , eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
SCREAMING_SNAKE_CASE = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path):
SCREAMING_SNAKE_CASE = model_args.model_name_or_path
else:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=_UpperCAmelCase)
trainer.save_model() # Saves the tokenizer too for easy upload
SCREAMING_SNAKE_CASE = os.path.join(training_args.output_dir , 'train_results.txt')
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w') as writer:
logger.info('***** Train results *****')
for key, value in sorted(train_result.metrics.items()):
logger.info(F''' {key} = {value}''')
writer.write(F'''{key} = {value}\n''')
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json'))
# Evaluation
SCREAMING_SNAKE_CASE = {}
if training_args.do_eval:
logger.info('*** Evaluate ***')
SCREAMING_SNAKE_CASE = trainer.evaluate()
SCREAMING_SNAKE_CASE = math.exp(eval_output['eval_loss'])
SCREAMING_SNAKE_CASE = perplexity
SCREAMING_SNAKE_CASE = os.path.join(training_args.output_dir , 'eval_results_mlm_wwm.txt')
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w') as writer:
logger.info('***** Eval results *****')
for key, value in sorted(results.items()):
logger.info(F''' {key} = {value}''')
writer.write(F'''{key} = {value}\n''')
return results
def lowerCamelCase__ (_UpperCAmelCase):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 73
|
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def lowerCamelCase__ (_UpperCAmelCase):
return 1.0 / (1.0 + np.exp(-_outputs))
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = np.max(_outputs , axis=-1 , keepdims=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = np.exp(_outputs - maxes)
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_UpperCAmelCase)
class _snake_case ( A__ ):
_lowercase : Tuple = '''sigmoid'''
_lowercase : List[str] = '''softmax'''
_lowercase : Tuple = '''none'''
@add_end_docstrings(
A__ , R'''
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `"default"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `"sigmoid"`: Applies the sigmoid function on the output.
- `"softmax"`: Applies the softmax function on the output.
- `"none"`: Does not apply any function on the output.
''' , )
class _snake_case ( A__ ):
_lowercase : Optional[Any] = False
_lowercase : Tuple = ClassificationFunction.NONE
def __init__( self , **a) -> Optional[Any]:
super().__init__(**a)
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING)
def SCREAMING_SNAKE_CASE__ ( self , a=None , a=None , a="" , **a) -> Tuple:
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
SCREAMING_SNAKE_CASE = tokenizer_kwargs
SCREAMING_SNAKE_CASE = {}
if hasattr(self.model.config , 'return_all_scores') and return_all_scores is None:
SCREAMING_SNAKE_CASE = self.model.config.return_all_scores
if isinstance(a , a) or top_k is None:
SCREAMING_SNAKE_CASE = top_k
SCREAMING_SNAKE_CASE = False
elif return_all_scores is not None:
warnings.warn(
'`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'
' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.' , a , )
if return_all_scores:
SCREAMING_SNAKE_CASE = None
else:
SCREAMING_SNAKE_CASE = 1
if isinstance(a , a):
SCREAMING_SNAKE_CASE = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
SCREAMING_SNAKE_CASE = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *a , **a) -> Optional[int]:
SCREAMING_SNAKE_CASE = super().__call__(*a , **a)
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
SCREAMING_SNAKE_CASE = 'top_k' not in kwargs
if isinstance(args[0] , a) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def SCREAMING_SNAKE_CASE__ ( self , a , **a) -> Dict[str, GenericTensor]:
SCREAMING_SNAKE_CASE = self.framework
if isinstance(a , a):
return self.tokenizer(**a , return_tensors=a , **a)
elif isinstance(a , a) and len(a) == 1 and isinstance(inputs[0] , a) and len(inputs[0]) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=a , **a)
elif isinstance(a , a):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'
' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.')
return self.tokenizer(a , return_tensors=a , **a)
def SCREAMING_SNAKE_CASE__ ( self , a) -> Optional[Any]:
return self.model(**a)
def SCREAMING_SNAKE_CASE__ ( self , a , a=None , a=1 , a=True) -> Any:
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
SCREAMING_SNAKE_CASE = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
SCREAMING_SNAKE_CASE = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , 'function_to_apply') and function_to_apply is None:
SCREAMING_SNAKE_CASE = self.model.config.function_to_apply
else:
SCREAMING_SNAKE_CASE = ClassificationFunction.NONE
SCREAMING_SNAKE_CASE = model_outputs['logits'][0]
SCREAMING_SNAKE_CASE = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
SCREAMING_SNAKE_CASE = sigmoid(a)
elif function_to_apply == ClassificationFunction.SOFTMAX:
SCREAMING_SNAKE_CASE = softmax(a)
elif function_to_apply == ClassificationFunction.NONE:
SCREAMING_SNAKE_CASE = outputs
else:
raise ValueError(f'''Unrecognized `function_to_apply` argument: {function_to_apply}''')
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
SCREAMING_SNAKE_CASE = [
{'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(a)
]
if not _legacy:
dict_scores.sort(key=lambda a: x["score"] , reverse=a)
if top_k is not None:
SCREAMING_SNAKE_CASE = dict_scores[:top_k]
return dict_scores
| 73
| 1
|
import os
import time
import numpy as np
import onnxruntime as ort
__snake_case : Any = "1"
__snake_case : int = "0"
__snake_case : Any = "1"
__snake_case : List[str] = ort.SessionOptions()
__snake_case : Union[str, Any] = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print("Create inference session...")
__snake_case : Optional[Any] = ["TensorrtExecutionProvider", "CUDAExecutionProvider"]
__snake_case : Any = ort.InferenceSession("model.onnx", sess_options=sess_opt, providers=execution_provider)
__snake_case : Tuple = ort.RunOptions()
__snake_case : Optional[int] = 128
__snake_case : Optional[Any] = 1
__snake_case : int = np.ones((batch, sequence), dtype=np.intaa)
__snake_case : str = np.ones((batch, sequence), dtype=np.intaa)
__snake_case : Union[str, Any] = np.ones((batch, sequence), dtype=np.intaa)
print("Warm up phase...")
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("Start inference...")
__snake_case : Tuple = time.time()
__snake_case : int = 2_000
__snake_case : List[str] = {}
for iter in range(max_iters):
__snake_case : Optional[int] = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("Average Inference Time = {:.3f} ms".format((time.time() - start_time) * 1_000 / max_iters))
| 720
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class UpperCAmelCase ( __snake_case ):
lowercase = """markuplm"""
def __init__( self : Optional[Any] , __magic_name__ : List[Any]=3_0_5_2_2 , __magic_name__ : int=7_6_8 , __magic_name__ : List[Any]=1_2 , __magic_name__ : List[Any]=1_2 , __magic_name__ : str=3_0_7_2 , __magic_name__ : Optional[int]="gelu" , __magic_name__ : Dict=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Any=5_1_2 , __magic_name__ : List[str]=2 , __magic_name__ : Dict=0.02 , __magic_name__ : List[str]=1e-12 , __magic_name__ : str=0 , __magic_name__ : List[str]=0 , __magic_name__ : Optional[Any]=2 , __magic_name__ : Dict=2_5_6 , __magic_name__ : Tuple=1_0_2_4 , __magic_name__ : Any=2_1_6 , __magic_name__ : str=1_0_0_1 , __magic_name__ : Dict=3_2 , __magic_name__ : Optional[int]=5_0 , __magic_name__ : List[Any]="absolute" , __magic_name__ : Any=True , __magic_name__ : Optional[Any]=None , **__magic_name__ : List[str] , ):
"""simple docstring"""
super().__init__(
pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ , )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = position_embedding_type
UpperCamelCase = use_cache
UpperCamelCase = classifier_dropout
# additional properties
UpperCamelCase = max_depth
UpperCamelCase = max_xpath_tag_unit_embeddings
UpperCamelCase = max_xpath_subs_unit_embeddings
UpperCamelCase = tag_pad_id
UpperCamelCase = subs_pad_id
UpperCamelCase = xpath_unit_hidden_size
| 181
| 0
|
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
def UpperCamelCase_( _A :Tuple , _A :str , _A :int )-> Optional[Any]:
UpperCamelCase__ = os.path.abspath(_A )
logger.info(F'''Converting TensorFlow checkpoint from {tf_path}''' )
# Load weights from TF model
UpperCamelCase__ = tf.train.list_variables(_A )
UpperCamelCase__ = []
UpperCamelCase__ = []
UpperCamelCase__ = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
UpperCamelCase__ = full_name.split("/" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F'''Skipping non-model layer {full_name}''' )
continue
if "optimizer" in full_name:
logger.info(F'''Skipping optimization layer {full_name}''' )
continue
if name[0] == "model":
# ignore initial 'model'
UpperCamelCase__ = name[1:]
# figure out how many levels deep the name is
UpperCamelCase__ = 0
for _name in name:
if _name.startswith("layer_with_weights" ):
depth += 1
else:
break
layer_depth.append(_A )
# read data
UpperCamelCase__ = tf.train.load_variable(_A , _A )
names.append("/".join(_A ) )
arrays.append(_A )
logger.info(F'''Read a total of {len(_A ):,} layers''' )
# Sanity check
if len(set(_A ) ) != 1:
raise ValueError(F'''Found layer names with different depths (layer depth {list(set(_A ) )})''' )
UpperCamelCase__ = list(set(_A ) )[0]
if layer_depth != 1:
raise ValueError(
"The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"
" heads." )
# convert layers
logger.info("Converting weights..." )
for full_name, array in zip(_A , _A ):
UpperCamelCase__ = full_name.split("/" )
UpperCamelCase__ = model
UpperCamelCase__ = []
for i, m_name in enumerate(_A ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("layer_with_weights" ):
UpperCamelCase__ = int(m_name.split("-" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["embeddings", "LayerNorm"] )
UpperCamelCase__ = getattr(_A , "embeddings" )
UpperCamelCase__ = getattr(_A , "LayerNorm" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["encoder", "layer", str(layer_num - 4 )] )
UpperCamelCase__ = getattr(_A , "encoder" )
UpperCamelCase__ = getattr(_A , "layer" )
UpperCamelCase__ = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["pooler", "dense"] )
UpperCamelCase__ = getattr(_A , "pooler" )
UpperCamelCase__ = getattr(_A , "dense" )
elif m_name == "embeddings":
trace.append("embeddings" )
UpperCamelCase__ = getattr(_A , "embeddings" )
if layer_num == 0:
trace.append("word_embeddings" )
UpperCamelCase__ = getattr(_A , "word_embeddings" )
elif layer_num == 1:
trace.append("position_embeddings" )
UpperCamelCase__ = getattr(_A , "position_embeddings" )
elif layer_num == 2:
trace.append("token_type_embeddings" )
UpperCamelCase__ = getattr(_A , "token_type_embeddings" )
else:
raise ValueError(F'''Unknown embedding layer with name {full_name}''' )
trace.append("weight" )
UpperCamelCase__ = getattr(_A , "weight" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["attention", "self"] )
UpperCamelCase__ = getattr(_A , "attention" )
UpperCamelCase__ = getattr(_A , "self" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["attention", "output", "LayerNorm"] )
UpperCamelCase__ = getattr(_A , "attention" )
UpperCamelCase__ = getattr(_A , "output" )
UpperCamelCase__ = getattr(_A , "LayerNorm" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["attention", "output", "dense"] )
UpperCamelCase__ = getattr(_A , "attention" )
UpperCamelCase__ = getattr(_A , "output" )
UpperCamelCase__ = getattr(_A , "dense" )
elif m_name == "_output_dense":
# output dense
trace.extend(["output", "dense"] )
UpperCamelCase__ = getattr(_A , "output" )
UpperCamelCase__ = getattr(_A , "dense" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["output", "LayerNorm"] )
UpperCamelCase__ = getattr(_A , "output" )
UpperCamelCase__ = getattr(_A , "LayerNorm" )
elif m_name == "_key_dense":
# attention key
trace.append("key" )
UpperCamelCase__ = getattr(_A , "key" )
elif m_name == "_query_dense":
# attention query
trace.append("query" )
UpperCamelCase__ = getattr(_A , "query" )
elif m_name == "_value_dense":
# attention value
trace.append("value" )
UpperCamelCase__ = getattr(_A , "value" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["intermediate", "dense"] )
UpperCamelCase__ = getattr(_A , "intermediate" )
UpperCamelCase__ = getattr(_A , "dense" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("output" )
UpperCamelCase__ = getattr(_A , "output" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("bias" )
UpperCamelCase__ = getattr(_A , "bias" )
elif m_name in ["kernel", "gamma"]:
trace.append("weight" )
UpperCamelCase__ = getattr(_A , "weight" )
else:
logger.warning(F'''Ignored {m_name}''' )
# for certain layers reshape is necessary
UpperCamelCase__ = ".".join(_A )
if re.match(r"(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)" , _A ) or re.match(
r"(\S+)\.attention\.output\.dense\.weight" , _A ):
UpperCamelCase__ = array.reshape(pointer.data.shape )
if "kernel" in full_name:
UpperCamelCase__ = array.transpose()
if pointer.shape == array.shape:
UpperCamelCase__ = torch.from_numpy(_A )
else:
raise ValueError(
F'''Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'''
F''' {array.shape}''' )
logger.info(F'''Successfully set variable {full_name} to PyTorch layer {trace}''' )
return model
def UpperCamelCase_( _A :Union[str, Any] , _A :Dict , _A :Dict )-> Dict:
# Instantiate model
logger.info(F'''Loading model based on config from {config_path}...''' )
UpperCamelCase__ = BertConfig.from_json_file(_A )
UpperCamelCase__ = BertModel(_A )
# Load weights from checkpoint
logger.info(F'''Loading weights from checkpoint {tf_checkpoint_path}...''' )
load_tfa_weights_in_bert(_A , _A , _A )
# Save pytorch-model
logger.info(F'''Saving PyTorch model to {pytorch_dump_path}...''' )
torch.save(model.state_dict() , _A )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow 2.x checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model (must include filename).',
)
__UpperCamelCase = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 551
|
from __future__ import annotations
import math
import random
from typing import Any
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
UpperCamelCase__ = []
UpperCamelCase__ = 0
UpperCamelCase__ = 0
def snake_case__ ( self ):
'''simple docstring'''
return self.head == self.tail
def snake_case__ ( self , snake_case ):
'''simple docstring'''
self.data.append(snake_case )
UpperCamelCase__ = self.tail + 1
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.data[self.head]
UpperCamelCase__ = self.head + 1
return ret
def snake_case__ ( self ):
'''simple docstring'''
return self.tail - self.head
def snake_case__ ( self ):
'''simple docstring'''
print(self.data )
print("**************" )
print(self.data[self.head : self.tail] )
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
UpperCamelCase__ = data
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = 1
def snake_case__ ( self ):
'''simple docstring'''
return self.data
def snake_case__ ( self ):
'''simple docstring'''
return self.left
def snake_case__ ( self ):
'''simple docstring'''
return self.right
def snake_case__ ( self ):
'''simple docstring'''
return self.height
def snake_case__ ( self , snake_case ):
'''simple docstring'''
UpperCamelCase__ = data
def snake_case__ ( self , snake_case ):
'''simple docstring'''
UpperCamelCase__ = node
def snake_case__ ( self , snake_case ):
'''simple docstring'''
UpperCamelCase__ = node
def snake_case__ ( self , snake_case ):
'''simple docstring'''
UpperCamelCase__ = height
def UpperCamelCase_( _A :MyNode | None )-> int:
if node is None:
return 0
return node.get_height()
def UpperCamelCase_( _A :int , _A :int )-> int:
if a > b:
return a
return b
def UpperCamelCase_( _A :MyNode )-> MyNode:
print("left rotation node:" , node.get_data() )
UpperCamelCase__ = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(_A )
UpperCamelCase__ = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_A )
UpperCamelCase__ = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(_A )
return ret
def UpperCamelCase_( _A :MyNode )-> MyNode:
print("right rotation node:" , node.get_data() )
UpperCamelCase__ = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(_A )
UpperCamelCase__ = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_A )
UpperCamelCase__ = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(_A )
return ret
def UpperCamelCase_( _A :MyNode )-> MyNode:
UpperCamelCase__ = node.get_left()
assert left_child is not None
node.set_left(left_rotation(_A ) )
return right_rotation(_A )
def UpperCamelCase_( _A :MyNode )-> MyNode:
UpperCamelCase__ = node.get_right()
assert right_child is not None
node.set_right(right_rotation(_A ) )
return left_rotation(_A )
def UpperCamelCase_( _A :MyNode | None , _A :Any )-> MyNode | None:
if node is None:
return MyNode(_A )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , _A ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
UpperCamelCase__ = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
UpperCamelCase__ = right_rotation(_A )
else:
UpperCamelCase__ = lr_rotation(_A )
else:
node.set_right(insert_node(node.get_right() , _A ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
UpperCamelCase__ = node.get_right()
assert right_child is not None
if data < right_child.get_data():
UpperCamelCase__ = rl_rotation(_A )
else:
UpperCamelCase__ = left_rotation(_A )
UpperCamelCase__ = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_A )
return node
def UpperCamelCase_( _A :MyNode )-> Any:
while True:
UpperCamelCase__ = root.get_right()
if right_child is None:
break
UpperCamelCase__ = right_child
return root.get_data()
def UpperCamelCase_( _A :MyNode )-> Any:
while True:
UpperCamelCase__ = root.get_left()
if left_child is None:
break
UpperCamelCase__ = left_child
return root.get_data()
def UpperCamelCase_( _A :MyNode , _A :Any )-> MyNode | None:
UpperCamelCase__ = root.get_left()
UpperCamelCase__ = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
UpperCamelCase__ = get_left_most(_A )
root.set_data(_A )
root.set_right(del_node(_A , _A ) )
elif left_child is not None:
UpperCamelCase__ = left_child
elif right_child is not None:
UpperCamelCase__ = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("No such data" )
return root
else:
root.set_left(del_node(_A , _A ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(_A , _A ) )
if get_height(_A ) - get_height(_A ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
UpperCamelCase__ = left_rotation(_A )
else:
UpperCamelCase__ = rl_rotation(_A )
elif get_height(_A ) - get_height(_A ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
UpperCamelCase__ = right_rotation(_A )
else:
UpperCamelCase__ = lr_rotation(_A )
UpperCamelCase__ = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(_A )
return root
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
UpperCamelCase__ = None
def snake_case__ ( self ):
'''simple docstring'''
return get_height(self.root )
def snake_case__ ( self , snake_case ):
'''simple docstring'''
print("insert:" + str(snake_case ) )
UpperCamelCase__ = insert_node(self.root , snake_case )
def snake_case__ ( self , snake_case ):
'''simple docstring'''
print("delete:" + str(snake_case ) )
if self.root is None:
print("Tree is empty!" )
return
UpperCamelCase__ = del_node(self.root , snake_case )
def __str__( self , ): # a level traversale, gives a more intuitive look on the tree
'''simple docstring'''
UpperCamelCase__ = ""
UpperCamelCase__ = MyQueue()
q.push(self.root )
UpperCamelCase__ = self.get_height()
if layer == 0:
return output
UpperCamelCase__ = 0
while not q.is_empty():
UpperCamelCase__ = q.pop()
UpperCamelCase__ = " " * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(snake_case )
q.push(snake_case )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
UpperCamelCase__ = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , snake_case ) - 1:
UpperCamelCase__ = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def UpperCamelCase_( )-> None:
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
__UpperCamelCase = AVLtree()
__UpperCamelCase = list(range(1_0))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 551
| 1
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCAmelCase ( self ) -> int:
a__ = TFXLMRobertaModel.from_pretrained('''jplu/tf-xlm-roberta-base''' )
a__ = {
'''input_ids''': tf.convert_to_tensor([[0, 2_6_4_6, 1_0_2_6_9, 8_3, 9_9_9_4_2, 2]] , dtype=tf.intaa ), # "My dog is cute"
'''attention_mask''': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
a__ = model(SCREAMING_SNAKE_CASE )['''last_hidden_state''']
a__ = tf.TensorShape((1, 6, 7_6_8) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
a__ = tf.convert_to_tensor(
[
[
[0.0_68_17_62, 0.10_89_44_51, 0.06_77_25_04],
[-0.06_42_36_68, 0.02_36_66_15, 0.04_32_93_44],
[-0.06_05_72_95, 0.09_97_41_35, -0.00_07_05_84],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 713
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ : Optional[Any] = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
a_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 148
| 0
|
"""simple docstring"""
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
_a = 1.054571817E-34 # unit of ℏ : J * s
_a = 3E8 # unit of c : m * s^-1
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> dict[str, float]:
"""simple docstring"""
if (force, area, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if force < 0:
raise ValueError('''Magnitude of force can not be negative''' )
if distance < 0:
raise ValueError('''Distance can not be negative''' )
if area < 0:
raise ValueError('''Area can not be negative''' )
if force == 0:
_UpperCamelCase = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
2_40 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_UpperCamelCase = (2_40 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_UpperCamelCase = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_40 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('''One and only one argument must be 0''' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 19
|
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=[0, 1, 2, 3] , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=[1, 384, 24, 24] , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , ) -> int:
UpperCamelCase :Union[str, Any] = parent
UpperCamelCase :Tuple = batch_size
UpperCamelCase :Optional[Any] = image_size
UpperCamelCase :Any = patch_size
UpperCamelCase :List[str] = num_channels
UpperCamelCase :int = is_training
UpperCamelCase :str = use_labels
UpperCamelCase :Optional[Any] = hidden_size
UpperCamelCase :int = num_hidden_layers
UpperCamelCase :List[Any] = backbone_out_indices
UpperCamelCase :str = num_attention_heads
UpperCamelCase :Tuple = intermediate_size
UpperCamelCase :Optional[int] = hidden_act
UpperCamelCase :List[Any] = hidden_dropout_prob
UpperCamelCase :List[str] = attention_probs_dropout_prob
UpperCamelCase :Union[str, Any] = initializer_range
UpperCamelCase :List[Any] = num_labels
UpperCamelCase :int = backbone_featmap_shape
UpperCamelCase :Any = scope
UpperCamelCase :int = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase :Dict = (image_size // patch_size) ** 2
UpperCamelCase :List[str] = num_patches + 1
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase :Tuple = None
if self.use_labels:
UpperCamelCase :Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase :Optional[int] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Any = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 192, 384, 768],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=SCREAMING_SNAKE_CASE_ , backbone_featmap_shape=self.backbone_featmap_shape , )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase :List[str] = DPTModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :Dict = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
UpperCamelCase :Optional[Any] = self.num_labels
UpperCamelCase :Optional[int] = DPTForDepthEstimation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :Dict = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCamelCase :Optional[int] = self.num_labels
UpperCamelCase :int = DPTForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :Dict = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Dict = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase :List[Any] = config_and_inputs
UpperCamelCase :List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( lowercase, lowercase, unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Tuple =(DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
UpperCamelCase_ : Tuple =(
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : Tuple =False
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : Tuple =False
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Union[str, Any] = DPTModelTester(self )
UpperCamelCase :List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def UpperCAmelCase ( self ) -> int:
pass
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase , UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase :int = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase :int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase , UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase :List[Any] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase :Optional[int] = [*signature.parameters.keys()]
UpperCamelCase :Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> List[str]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase , UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Any = True
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
continue
UpperCamelCase :Dict = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
UpperCamelCase :str = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def UpperCAmelCase ( self ) -> Tuple:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase , UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Optional[Any] = False
UpperCamelCase :List[Any] = True
if model_class in get_values(SCREAMING_SNAKE_CASE_ ) or not model_class.supports_gradient_checkpointing:
continue
UpperCamelCase :Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.gradient_checkpointing_enable()
model.train()
UpperCamelCase :Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase , UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Optional[int] = _config_zero_init(SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
UpperCamelCase :Union[str, Any] = model_class(config=SCREAMING_SNAKE_CASE_ )
# Skip the check for the backbone
UpperCamelCase :Optional[int] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
UpperCamelCase :Union[str, Any] = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCAmelCase ( self ) -> Any:
pass
@slow
def UpperCAmelCase ( self ) -> Optional[Any]:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
UpperCamelCase :Any = DPTModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[int]:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
UpperCamelCase , UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Union[str, Any] = '''add'''
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :Dict = DPTForDepthEstimation(SCREAMING_SNAKE_CASE_ )
def _A ( ):
UpperCamelCase :Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :List[Any] = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
UpperCamelCase :List[str] = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = prepare_img()
UpperCamelCase :List[str] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase :int = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = outputs.predicted_depth
# verify the predicted depth
UpperCamelCase :int = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 658
| 0
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__A =logging.get_logger(__name__)
__A ={
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = "van"
def __init__( self , lowercase=224 , lowercase=3 , lowercase=[7, 3, 3, 3] , lowercase=[4, 2, 2, 2] , lowercase=[64, 128, 320, 512] , lowercase=[3, 3, 12, 3] , lowercase=[8, 8, 4, 4] , lowercase="gelu" , lowercase=0.0_2 , lowercase=1e-6 , lowercase=1e-2 , lowercase=0.0 , lowercase=0.0 , **lowercase , ) -> List[Any]:
super().__init__(**UpperCamelCase__ )
lowerCamelCase_ = image_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = patch_sizes
lowerCamelCase_ = strides
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = depths
lowerCamelCase_ = mlp_ratios
lowerCamelCase_ = hidden_act
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = layer_scale_init_value
lowerCamelCase_ = drop_path_rate
lowerCamelCase_ = dropout_rate
| 700
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A =logging.get_logger(__name__)
__A ={
'''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 'vit'
def __init__( self , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.0_2 , lowercase=1e-12 , lowercase=224 , lowercase=16 , lowercase=3 , lowercase=True , lowercase=16 , **lowercase , ) -> int:
super().__init__(**lowercase )
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = qkv_bias
lowerCamelCase_ = encoder_stride
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE_( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def SCREAMING_SNAKE_CASE_( self ) -> float:
return 1e-4
| 313
| 0
|
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
SCREAMING_SNAKE_CASE :Tuple = logging.getLogger(__name__)
def lowerCAmelCase( )-> List[str]:
"""simple docstring"""
UpperCamelCase_ = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=SCREAMING_SNAKE_CASE_ , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=SCREAMING_SNAKE_CASE_ , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=SCREAMING_SNAKE_CASE_ , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=SCREAMING_SNAKE_CASE_ , default="data/dump" , help="The dump file prefix." )
UpperCamelCase_ = parser.parse_args()
logger.info(f"Loading Tokenizer ({args.tokenizer_name})" )
if args.tokenizer_type == "bert":
UpperCamelCase_ = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCamelCase_ = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
UpperCamelCase_ = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCamelCase_ = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCamelCase_ = tokenizer.special_tokens_map["cls_token"] # `<s>`
UpperCamelCase_ = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCamelCase_ = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCamelCase_ = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
UpperCamelCase_ = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(f"Loading text from {args.file_path}" )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
UpperCamelCase_ = fp.readlines()
logger.info("Start encoding" )
logger.info(f"{len(SCREAMING_SNAKE_CASE_ )} examples to process." )
UpperCamelCase_ = []
UpperCamelCase_ = 0
UpperCamelCase_ = 1_0_0_0_0
UpperCamelCase_ = time.time()
for text in data:
UpperCamelCase_ = f"{bos} {text.strip()} {sep}"
UpperCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
rslt.append(SCREAMING_SNAKE_CASE_ )
iter += 1
if iter % interval == 0:
UpperCamelCase_ = time.time()
logger.info(f"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" )
UpperCamelCase_ = time.time()
logger.info("Finished binarization" )
logger.info(f"{len(SCREAMING_SNAKE_CASE_ )} examples processed." )
UpperCamelCase_ = f"{args.dump_file}.{args.tokenizer_name}.pickle"
UpperCamelCase_ = tokenizer.vocab_size
if vocab_size < (1 << 1_6):
UpperCamelCase_ = [np.uintaa(SCREAMING_SNAKE_CASE_ ) for d in rslt]
else:
UpperCamelCase_ = [np.intaa(SCREAMING_SNAKE_CASE_ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f"Dump to {dp_file}" )
with open(SCREAMING_SNAKE_CASE_ , "wb" ) as handle:
pickle.dump(rslt_ , SCREAMING_SNAKE_CASE_ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 628
|
"""simple docstring"""
from collections.abc import Sequence
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : Sequence[int] | None = None ):
if nums is None or not nums:
raise ValueError("""Input sequence should not be empty""" )
A__ = nums[0]
for i in range(1 , len(UpperCamelCase ) ):
A__ = nums[i]
A__ = max(UpperCamelCase , ans + num , UpperCamelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowerCamelCase__ = int(input("Enter number of elements : ").strip())
lowerCamelCase__ = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n]
print(max_subsequence_sum(array))
| 574
| 0
|
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class __lowercase :
def __init__( self : Optional[int] , lowercase__ : Optional[Any] , lowercase__ : Optional[int]=1_0_0 , lowercase__ : Optional[Any]=1_3 , lowercase__ : Dict=3_0 , lowercase__ : Optional[Any]=2 , lowercase__ : List[str]=3 , lowercase__ : Union[str, Any]=True , lowercase__ : Dict=True , lowercase__ : Optional[int]=3_2 , lowercase__ : Union[str, Any]=4 , lowercase__ : List[Any]=4 , lowercase__ : Any=3_7 , lowercase__ : int="gelu" , lowercase__ : Tuple=0.1 , lowercase__ : Optional[int]=0.1 , lowercase__ : Dict=1_0 , lowercase__ : Dict=0.02 , lowercase__ : Dict=3 , lowercase__ : Union[str, Any]=None , lowercase__ : Union[str, Any]=[0, 1, 2, 3] , ):
a_ = parent
a_ = 1_0_0
a_ = batch_size
a_ = image_size
a_ = patch_size
a_ = num_channels
a_ = is_training
a_ = use_labels
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = type_sequence_label_size
a_ = initializer_range
a_ = scope
a_ = out_indices
a_ = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a_ = (image_size // patch_size) ** 2
a_ = num_patches + 1
def __magic_name__ ( self : str ):
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__ ( self : List[str] ):
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase__ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def __magic_name__ ( self : Any , lowercase__ : Optional[int] , lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Optional[Any] ):
a_ = BeitModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : Dict , lowercase__ : Any , lowercase__ : Union[str, Any] , lowercase__ : Optional[Any] , lowercase__ : int ):
a_ = BeitForMaskedImageModeling(config=lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __magic_name__ ( self : Optional[int] , lowercase__ : str , lowercase__ : Any , lowercase__ : Optional[Any] , lowercase__ : Tuple ):
a_ = self.type_sequence_label_size
a_ = BeitForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = model(lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a_ = 1
a_ = BeitForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a_ = model(lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self : Dict , lowercase__ : Optional[Any] , lowercase__ : List[Any] , lowercase__ : Any , lowercase__ : Any ):
a_ = self.num_labels
a_ = BeitForSemanticSegmentation(lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = model(lowercase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
a_ = model(lowercase__ , labels=lowercase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __magic_name__ ( self : Tuple ):
a_ = self.prepare_config_and_inputs()
a_ , a_ , a_ , a_ = config_and_inputs
a_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( a__ , a__ , unittest.TestCase ):
_lowerCAmelCase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
_lowerCAmelCase = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def __magic_name__ ( self : Dict ):
a_ = BeitModelTester(self )
a_ = ConfigTester(self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=3_7 )
def __magic_name__ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''BEiT does not use inputs_embeds''' )
def __magic_name__ ( self : Dict ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='''BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def __magic_name__ ( self : Optional[Any] ):
pass
def __magic_name__ ( self : Tuple ):
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(lowercase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase__ , nn.Linear ) )
def __magic_name__ ( self : List[Any] ):
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(lowercase__ )
a_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ = [*signature.parameters.keys()]
a_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase__ )
def __magic_name__ ( self : List[str] ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def __magic_name__ ( self : int ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase__ )
def __magic_name__ ( self : Tuple ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__ )
def __magic_name__ ( self : List[str] ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase__ )
def __magic_name__ ( self : List[str] ):
if not self.model_tester.is_training:
return
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(lowercase__ ), BeitForMaskedImageModeling]:
continue
a_ = model_class(lowercase__ )
model.to(lowercase__ )
model.train()
a_ = self._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__ )
a_ = model(**lowercase__ ).loss
loss.backward()
def __magic_name__ ( self : Dict ):
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
a_ = False
a_ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(lowercase__ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
a_ = model_class(lowercase__ )
model.gradient_checkpointing_enable()
model.to(lowercase__ )
model.train()
a_ = self._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__ )
a_ = model(**lowercase__ ).loss
loss.backward()
def __magic_name__ ( self : Union[str, Any] ):
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = _config_zero_init(lowercase__ )
for model_class in self.all_model_classes:
a_ = model_class(config=lowercase__ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@slow
def __magic_name__ ( self : Dict ):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = BeitModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def UpperCAmelCase__ ( ):
"""simple docstring"""
a_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : Union[str, Any] ):
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def __magic_name__ ( self : Optional[int] ):
a_ = BeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' ).to(lowercase__ )
a_ = self.default_image_processor
a_ = prepare_img()
a_ = image_processor(images=lowercase__ , return_tensors='''pt''' ).pixel_values.to(lowercase__ )
# prepare bool_masked_pos
a_ = torch.ones((1, 1_9_6) , dtype=torch.bool ).to(lowercase__ )
# forward pass
with torch.no_grad():
a_ = model(pixel_values=lowercase__ , bool_masked_pos=lowercase__ )
a_ = outputs.logits
# verify the logits
a_ = torch.Size((1, 1_9_6, 8_1_9_2) )
self.assertEqual(logits.shape , lowercase__ )
a_ = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(lowercase__ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , lowercase__ , atol=1e-2 ) )
@slow
def __magic_name__ ( self : List[Any] ):
a_ = BeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' ).to(lowercase__ )
a_ = self.default_image_processor
a_ = prepare_img()
a_ = image_processor(images=lowercase__ , return_tensors='''pt''' ).to(lowercase__ )
# forward pass
with torch.no_grad():
a_ = model(**lowercase__ )
a_ = outputs.logits
# verify the logits
a_ = torch.Size((1, 1_0_0_0) )
self.assertEqual(logits.shape , lowercase__ )
a_ = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(lowercase__ )
self.assertTrue(torch.allclose(logits[0, :3] , lowercase__ , atol=1e-4 ) )
a_ = 2_8_1
self.assertEqual(logits.argmax(-1 ).item() , lowercase__ )
@slow
def __magic_name__ ( self : Any ):
a_ = BeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' ).to(
lowercase__ )
a_ = self.default_image_processor
a_ = prepare_img()
a_ = image_processor(images=lowercase__ , return_tensors='''pt''' ).to(lowercase__ )
# forward pass
with torch.no_grad():
a_ = model(**lowercase__ )
a_ = outputs.logits
# verify the logits
a_ = torch.Size((1, 2_1_8_4_1) )
self.assertEqual(logits.shape , lowercase__ )
a_ = torch.tensor([1.6881, -0.2787, 0.5901] ).to(lowercase__ )
self.assertTrue(torch.allclose(logits[0, :3] , lowercase__ , atol=1e-4 ) )
a_ = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item() , lowercase__ )
@slow
def __magic_name__ ( self : Optional[int] ):
a_ = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
a_ = model.to(lowercase__ )
a_ = BeitImageProcessor(do_resize=lowercase__ , size=6_4_0 , do_center_crop=lowercase__ )
a_ = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
a_ = Image.open(ds[0]['''file'''] )
a_ = image_processor(images=lowercase__ , return_tensors='''pt''' ).to(lowercase__ )
# forward pass
with torch.no_grad():
a_ = model(**lowercase__ )
a_ = outputs.logits
# verify the logits
a_ = torch.Size((1, 1_5_0, 1_6_0, 1_6_0) )
self.assertEqual(logits.shape , lowercase__ )
a_ = version.parse(PIL.__version__ ) < version.parse('''9.0.0''' )
if is_pillow_less_than_a:
a_ = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=lowercase__ , )
else:
a_ = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=lowercase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowercase__ , atol=1e-4 ) )
@slow
def __magic_name__ ( self : Dict ):
a_ = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
a_ = model.to(lowercase__ )
a_ = BeitImageProcessor(do_resize=lowercase__ , size=6_4_0 , do_center_crop=lowercase__ )
a_ = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
a_ = Image.open(ds[0]['''file'''] )
a_ = image_processor(images=lowercase__ , return_tensors='''pt''' ).to(lowercase__ )
# forward pass
with torch.no_grad():
a_ = model(**lowercase__ )
a_ = outputs.logits.detach().cpu()
a_ = image_processor.post_process_semantic_segmentation(outputs=lowercase__ , target_sizes=[(5_0_0, 3_0_0)] )
a_ = torch.Size((5_0_0, 3_0_0) )
self.assertEqual(segmentation[0].shape , lowercase__ )
a_ = image_processor.post_process_semantic_segmentation(outputs=lowercase__ )
a_ = torch.Size((1_6_0, 1_6_0) )
self.assertEqual(segmentation[0].shape , lowercase__ )
| 700
|
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
UpperCamelCase__ = logging.get_logger(__name__)
class __lowercase ( a__ ):
def __init__( self : List[Any] , *lowercase__ : Tuple , **lowercase__ : List[Any] ):
warnings.warn(
'''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PoolFormerImageProcessor instead.''' , lowercase__ , )
super().__init__(*lowercase__ , **lowercase__ )
| 143
| 0
|
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _UpperCAmelCase :
"""simple docstring"""
@staticmethod
def _lowerCAmelCase ( *lowerCAmelCase_ , **lowerCAmelCase_ ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : str = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , )
a_ : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
a_ : List[Any] = image_classifier(lowerCAmelCase_ , candidate_labels=["""a""", """b""", """c"""] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(lowerCAmelCase_ ) , [
[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}],
[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """c"""}, {"""score""": 0.333, """label""": """b"""}],
] , )
a_ : Optional[Any] = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [
[
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
],
[
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
],
[
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
],
[
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
],
[
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
],
] , )
@require_tf
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Union[str, Any] = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , framework="""tf""" )
a_ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
a_ : Tuple = image_classifier(lowerCAmelCase_ , candidate_labels=["""a""", """b""", """c"""] )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}] , )
a_ : List[Any] = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [
[
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
],
[
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
],
[
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
],
[
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
],
[
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(lowerCAmelCase_ )},
],
] , )
@slow
@require_torch
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : List[Any] = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , )
# This is an image of 2 cats with remotes and no planes
a_ : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
a_ : Dict = image_classifier(lowerCAmelCase_ , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
] , )
a_ : int = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [
[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
],
]
* 5 , )
@slow
@require_tf
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : int = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , framework="""tf""" )
# This is an image of 2 cats with remotes and no planes
a_ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
a_ : List[Any] = image_classifier(lowerCAmelCase_ , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
] , )
a_ : List[Any] = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [
[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
],
]
* 5 , )
| 577
|
'''simple docstring'''
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
a_ : Union[str, Any] = FlaxDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-pipe""" , safety_checker=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ )
a_ : List[str] = [t[-1] for t in os.walk(os.path.join(lowerCAmelCase_ , os.listdir(lowerCAmelCase_ )[0] , """snapshots""" ) )]
a_ : Any = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(""".bin""" ) for f in files )
@slow
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ , a_ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-pipe""" , safety_checker=lowerCAmelCase_ )
a_ : Optional[int] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
a_ : List[str] = jax.random.PRNGKey(0 )
a_ : Optional[Any] = 4
a_ : Dict = jax.device_count()
a_ : Any = num_samples * [prompt]
a_ : Optional[Any] = pipeline.prepare_inputs(lowerCAmelCase_ )
# shard inputs and rng
a_ : str = replicate(lowerCAmelCase_ )
a_ : List[Any] = jax.random.split(lowerCAmelCase_ , lowerCAmelCase_ )
a_ : Tuple = shard(lowerCAmelCase_ )
a_ : Any = pipeline(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1514745 ) < 1E-3
assert np.abs(np.abs(lowerCAmelCase_ , dtype=np.floataa ).sum() - 49947.875 ) < 5E-1
a_ : List[str] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCAmelCase_ ) == num_samples
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ , a_ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""flax""" , safety_checker=lowerCAmelCase_ )
a_ : int = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
a_ : int = jax.random.PRNGKey(0 )
a_ : Any = 50
a_ : List[str] = jax.device_count()
a_ : Any = num_samples * [prompt]
a_ : List[str] = pipeline.prepare_inputs(lowerCAmelCase_ )
# shard inputs and rng
a_ : int = replicate(lowerCAmelCase_ )
a_ : Optional[int] = jax.random.split(lowerCAmelCase_ , lowerCAmelCase_ )
a_ : int = shard(lowerCAmelCase_ )
a_ : Any = pipeline(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05652401) ) < 1E-3
assert np.abs((np.abs(lowerCAmelCase_ , dtype=np.floataa ).sum() - 2383808.2) ) < 5E-1
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ , a_ : Dict = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=lowerCAmelCase_ )
a_ : Optional[int] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
a_ : List[Any] = jax.random.PRNGKey(0 )
a_ : str = 50
a_ : List[Any] = jax.device_count()
a_ : Optional[int] = num_samples * [prompt]
a_ : List[str] = pipeline.prepare_inputs(lowerCAmelCase_ )
# shard inputs and rng
a_ : Optional[int] = replicate(lowerCAmelCase_ )
a_ : Dict = jax.random.split(lowerCAmelCase_ , lowerCAmelCase_ )
a_ : Union[str, Any] = shard(lowerCAmelCase_ )
a_ : List[Any] = pipeline(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1E-3
assert np.abs((np.abs(lowerCAmelCase_ , dtype=np.floataa ).sum() - 2373516.75) ) < 5E-1
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ , a_ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa )
a_ : Optional[Any] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
a_ : Optional[Any] = jax.random.PRNGKey(0 )
a_ : str = 50
a_ : Optional[int] = jax.device_count()
a_ : List[Any] = num_samples * [prompt]
a_ : List[Any] = pipeline.prepare_inputs(lowerCAmelCase_ )
# shard inputs and rng
a_ : str = replicate(lowerCAmelCase_ )
a_ : Dict = jax.random.split(lowerCAmelCase_ , lowerCAmelCase_ )
a_ : Optional[Any] = shard(lowerCAmelCase_ )
a_ : Optional[Any] = pipeline(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1E-3
assert np.abs((np.abs(lowerCAmelCase_ , dtype=np.floataa ).sum() - 2373516.75) ) < 5E-1
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Any = FlaxDDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , set_alpha_to_one=lowerCAmelCase_ , steps_offset=1 , )
a_ , a_ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , scheduler=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , )
a_ : Optional[Any] = scheduler.create_state()
a_ : Dict = scheduler_state
a_ : int = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
a_ : Tuple = jax.random.PRNGKey(0 )
a_ : Tuple = 50
a_ : int = jax.device_count()
a_ : Any = num_samples * [prompt]
a_ : Optional[int] = pipeline.prepare_inputs(lowerCAmelCase_ )
# shard inputs and rng
a_ : Dict = replicate(lowerCAmelCase_ )
a_ : List[Any] = jax.random.split(lowerCAmelCase_ , lowerCAmelCase_ )
a_ : int = shard(lowerCAmelCase_ )
a_ : int = pipeline(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045043945) ) < 1E-3
assert np.abs((np.abs(lowerCAmelCase_ , dtype=np.floataa ).sum() - 2347693.5) ) < 5E-1
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Union[str, Any] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
a_ : Optional[Any] = jax.device_count()
a_ : Optional[int] = num_samples * [prompt]
a_ : Optional[int] = jax.random.split(jax.random.PRNGKey(0 ) , lowerCAmelCase_ )
a_ , a_ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=lowerCAmelCase_ , )
a_ : Dict = replicate(lowerCAmelCase_ )
a_ : Any = pipeline.prepare_inputs(lowerCAmelCase_ )
a_ : Optional[int] = shard(lowerCAmelCase_ )
a_ : str = pipeline(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
a_ : Union[str, Any] = images[2, 0, 2_56, 10:17, 1]
# With memory efficient attention
a_ , a_ : str = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=lowerCAmelCase_ , use_memory_efficient_attention=lowerCAmelCase_ , )
a_ : Optional[Any] = replicate(lowerCAmelCase_ )
a_ : int = pipeline.prepare_inputs(lowerCAmelCase_ )
a_ : Optional[int] = shard(lowerCAmelCase_ )
a_ : Union[str, Any] = pipeline(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ ).images
assert images_eff.shape == (num_samples, 1, 5_12, 5_12, 3)
a_ : Optional[Any] = images[2, 0, 2_56, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 577
| 1
|
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
def A_ ( SCREAMING_SNAKE_CASE_ ) ->str:
lowercase_ = MobileNetVaConfig(layer_norm_eps=0.0_01 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
lowercase_ = re.match(r"""^mobilenet_v1_([^_]*)_([^_]*)$""" , SCREAMING_SNAKE_CASE_ )
if matches:
lowercase_ = float(matches[1] )
lowercase_ = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
lowercase_ = 10_01
lowercase_ = """imagenet-1k-id2label.json"""
lowercase_ = """huggingface/label-files"""
lowercase_ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="""dataset""" ) , """r""" ) )
lowercase_ = {int(SCREAMING_SNAKE_CASE_ ) + 1: v for k, v in idalabel.items()}
lowercase_ = """background"""
lowercase_ = idalabel
lowercase_ = {v: k for k, v in idalabel.items()}
return config
def A_ ( ) ->Any:
lowercase_ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase_ = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) ->Union[str, Any]:
lowercase_ = get_mobilenet_va_config(SCREAMING_SNAKE_CASE_ )
# Load 🤗 model
lowercase_ = MobileNetVaForImageClassification(SCREAMING_SNAKE_CASE_ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
lowercase_ = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
lowercase_ = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowercase_ = model(**SCREAMING_SNAKE_CASE_ )
lowercase_ = outputs.logits
assert logits.shape == (1, 10_01)
if model_name == "mobilenet_v1_1.0_224":
lowercase_ = torch.tensor([-4.17_39, -1.12_33, 3.12_05] )
elif model_name == "mobilenet_v1_0.75_192":
lowercase_ = torch.tensor([-3.94_40, -2.31_41, -0.33_33] )
else:
lowercase_ = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print("""Pushing to the hub...""" )
lowercase_ = """google/""" + model_name
image_processor.push_to_hub(SCREAMING_SNAKE_CASE_ )
model.push_to_hub(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""mobilenet_v1_1.0_224""",
type=str,
help="""Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.""",
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__snake_case = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 603
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
__snake_case = logging.get_logger(__name__)
class _a ( __a ):
"""simple docstring"""
def __init__( self : Dict , lowercase_ : int , lowercase_ : int , lowercase_ : float , **lowercase_ : Dict ):
'''simple docstring'''
lowercase_ = feature_size
lowercase_ = sampling_rate
lowercase_ = padding_value
lowercase_ = kwargs.pop("""padding_side""" , """right""" )
lowercase_ = kwargs.pop("""return_attention_mask""" , lowercase_ )
super().__init__(**lowercase_ )
def lowerCamelCase__ ( self : List[Any] , lowercase_ : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , lowercase_ : Union[bool, str, PaddingStrategy] = True , lowercase_ : Optional[int] = None , lowercase_ : bool = False , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[Union[str, TensorType]] = None , ):
'''simple docstring'''
if isinstance(lowercase_ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
lowercase_ = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"""You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"""
F""" to this method that includes {self.model_input_names[0]}, but you provided"""
F""" {list(processed_features.keys() )}""" )
lowercase_ = processed_features[self.model_input_names[0]]
lowercase_ = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowercase_ ) == 0:
if return_attention_mask:
lowercase_ = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
lowercase_ = required_input[0]
if isinstance(lowercase_ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
lowercase_ = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowercase_ ):
lowercase_ = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowercase_ ):
lowercase_ = """tf"""
elif is_torch_tensor(lowercase_ ):
lowercase_ = """pt"""
elif isinstance(lowercase_ , (int, float, list, tuple, np.ndarray) ):
lowercase_ = """np"""
else:
raise ValueError(
F"""type of {first_element} unknown: {type(lowercase_ )}. """
"""Should be one of a python, numpy, pytorch or tensorflow object.""" )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
lowercase_ = to_numpy(lowercase_ )
else:
lowercase_ = [to_numpy(lowercase_ ) for v in value]
# Convert padding_strategy in PaddingStrategy
lowercase_ = self._get_padding_strategies(padding=lowercase_ , max_length=lowercase_ )
lowercase_ = processed_features[self.model_input_names[0]]
lowercase_ = len(lowercase_ )
if not all(len(lowercase_ ) == batch_size for v in processed_features.values() ):
raise ValueError("""Some items in the output dictionary have a different batch size than others.""" )
lowercase_ = []
for i in range(lowercase_ ):
lowercase_ = {k: v[i] for k, v in processed_features.items()}
# truncation
lowercase_ = self._truncate(
lowercase_ , max_length=lowercase_ , pad_to_multiple_of=lowercase_ , truncation=lowercase_ , )
truncated_inputs.append(lowercase_ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
lowercase_ = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
lowercase_ = PaddingStrategy.MAX_LENGTH
lowercase_ = {}
for i in range(lowercase_ ):
# padding
lowercase_ = self._pad(
truncated_inputs[i] , max_length=lowercase_ , padding_strategy=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , )
for key, value in outputs.items():
if key not in batch_outputs:
lowercase_ = []
if value.dtype is np.dtype(np.floataa ):
lowercase_ = value.astype(np.floataa )
batch_outputs[key].append(lowercase_ )
return BatchFeature(lowercase_ , tensor_type=lowercase_ )
def lowerCamelCase__ ( self : Any , lowercase_ : Union[Dict[str, np.ndarray], BatchFeature] , lowercase_ : Optional[int] = None , lowercase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , ):
'''simple docstring'''
lowercase_ = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
lowercase_ = len(lowercase_ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
lowercase_ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
lowercase_ = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowercase_ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
lowercase_ = np.ones(len(lowercase_ ) , dtype=np.intaa )
if needs_to_be_padded:
lowercase_ = max_length - len(lowercase_ )
if self.padding_side == "right":
if return_attention_mask:
lowercase_ = np.pad(
processed_features["""attention_mask"""] , (0, difference) )
lowercase_ = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
lowercase_ = np.pad(
lowercase_ , lowercase_ , """constant""" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
lowercase_ = np.pad(
processed_features["""attention_mask"""] , (difference, 0) )
lowercase_ = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
lowercase_ = np.pad(
lowercase_ , lowercase_ , """constant""" , constant_values=self.padding_value )
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return processed_features
def lowerCamelCase__ ( self : List[Any] , lowercase_ : Union[Dict[str, np.ndarray], BatchFeature] , lowercase_ : Optional[int] = None , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , ):
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" )
lowercase_ = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
lowercase_ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
lowercase_ = len(lowercase_ ) > max_length
if needs_to_be_truncated:
lowercase_ = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
lowercase_ = processed_features["""attention_mask"""][:max_length]
return processed_features
def lowerCamelCase__ ( self : List[Any] , lowercase_ : Optional[int]=False , lowercase_ : List[str]=None ):
'''simple docstring'''
if padding is not False:
if padding is True:
lowercase_ = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowercase_ , lowercase_ ):
lowercase_ = PaddingStrategy(lowercase_ )
elif isinstance(lowercase_ , lowercase_ ):
lowercase_ = padding
else:
lowercase_ = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"""Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"""
""" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" )
return padding_strategy
| 603
| 1
|
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__A =[
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__A =logging.getLogger()
def lowerCamelCase_ ( ):
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument("-f" )
lowerCamelCase_ = parser.parse_args()
return args.f
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__="eval" ):
lowerCamelCase_ = os.path.join(lowerCamelCase__ , F'{split}_results.json' )
if os.path.exists(lowerCamelCase__ ):
with open(lowerCamelCase__ , "r" ) as f:
return json.load(lowerCamelCase__ )
raise ValueError(F'can\'t find {path}' )
__A =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f'\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split()
with patch.object(lowercase , "argv" , lowercase ):
run_flax_glue.main()
lowerCamelCase_ = get_results(lowercase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5 )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f'\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '.split()
with patch.object(lowercase , "argv" , lowercase ):
run_clm_flax.main()
lowerCamelCase_ = get_results(lowercase )
self.assertLess(result["eval_perplexity"] , 100 )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f'\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n '.split()
with patch.object(lowercase , "argv" , lowercase ):
run_summarization_flax.main()
lowerCamelCase_ = get_results(lowercase , split="test" )
self.assertGreaterEqual(result["test_rouge1"] , 10 )
self.assertGreaterEqual(result["test_rouge2"] , 2 )
self.assertGreaterEqual(result["test_rougeL"] , 7 )
self.assertGreaterEqual(result["test_rougeLsum"] , 7 )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f'\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n '.split()
with patch.object(lowercase , "argv" , lowercase ):
run_mlm_flax.main()
lowerCamelCase_ = get_results(lowercase )
self.assertLess(result["eval_perplexity"] , 42 )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f'\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '.split()
with patch.object(lowercase , "argv" , lowercase ):
run_ta_mlm_flax.main()
lowerCamelCase_ = get_results(lowercase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.4_2 )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
lowerCamelCase_ = 7 if get_gpu_count() > 1 else 2
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f'\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n '.split()
with patch.object(lowercase , "argv" , lowercase ):
run_flax_ner.main()
lowerCamelCase_ = get_results(lowercase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5 )
self.assertGreaterEqual(result["eval_f1"] , 0.3 )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f'\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n '.split()
with patch.object(lowercase , "argv" , lowercase ):
run_qa.main()
lowerCamelCase_ = get_results(lowercase )
self.assertGreaterEqual(result["eval_f1"] , 30 )
self.assertGreaterEqual(result["eval_exact"] , 30 )
| 463
|
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__A =logging.getLogger(__name__)
def lowerCamelCase_ ( ):
lowerCamelCase_ = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" , type=lowerCamelCase__ , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , )
parser.add_argument(
"--dataset_config" , type=lowerCamelCase__ , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" , type=lowerCamelCase__ , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , )
parser.add_argument(
"--shard_size" , type=lowerCamelCase__ , default=1_0_0_0 , help="Number of entries to go in a single shard." , )
parser.add_argument("--split" , type=lowerCamelCase__ , default="train" , choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="Limit the number of shards (used for debugging)." , )
parser.add_argument(
"--max_length" , type=lowerCamelCase__ , default=5_1_2 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." , )
parser.add_argument(
"--output_dir" , default="tf-tpu" , type=lowerCamelCase__ , help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." , )
lowerCamelCase_ = parser.parse_args()
return args
def lowerCamelCase_ ( lowerCamelCase__ ):
def fn(lowerCamelCase__ ):
return tokenizer(examples["text"] )
return fn
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = []
for i in range(len(tokenized_data["input_ids"] ) ):
lowerCamelCase_ = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
lowerCamelCase_ = tf.train.Features(feature=lowerCamelCase__ )
lowerCamelCase_ = tf.train.Example(features=lowerCamelCase__ )
lowerCamelCase_ = example.SerializeToString()
records.append(lowerCamelCase__ )
return records
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowerCamelCase_ = min(len(lowerCamelCase__ ) , args.limit )
lowerCamelCase_ = dataset.select(range(lowerCamelCase__ ) )
print(F'Limiting the dataset to {args.limit} entries.' )
lowerCamelCase_ = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowerCamelCase_ = os.path.join(args.output_dir , args.split )
if not os.path.exists(lowerCamelCase__ ):
os.makedirs(lowerCamelCase__ )
else:
lowerCamelCase_ = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowerCamelCase_ = tokenize_function(lowerCamelCase__ )
lowerCamelCase_ = dataset.map(lowerCamelCase__ , batched=lowerCamelCase__ , num_proc=4 , remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(lowerCamelCase__ ):
# Concatenate all texts.
lowerCamelCase_ = {k: sum(examples[k] , [] ) for k in examples.keys()}
lowerCamelCase_ = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowerCamelCase_ = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowerCamelCase_ = {
k: [t[i : i + args.max_length] for i in range(0 , lowerCamelCase__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowerCamelCase_ = dataset_tokenized.map(lowerCamelCase__ , batched=lowerCamelCase__ , batch_size=1_0_0_0 , num_proc=4 )
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for shard in range(0 , len(lowerCamelCase__ ) , args.shard_size ):
lowerCamelCase_ = grouped_dataset[shard : shard + args.shard_size]
lowerCamelCase_ = len(dataset_snapshot["input_ids"] )
lowerCamelCase_ = os.path.join(lowerCamelCase__ , F'dataset-{shard_count}-{records_containing}.tfrecord' )
lowerCamelCase_ = get_serialized_examples(lowerCamelCase__ )
with tf.io.TFRecordWriter(lowerCamelCase__ ) as out_file:
for i in range(len(lowerCamelCase__ ) ):
lowerCamelCase_ = serialized_examples[i]
out_file.write(lowerCamelCase__ )
print("Wrote file {} containing {} records".format(lowerCamelCase__ , lowerCamelCase__ ) )
shard_count += 1
total_records += records_containing
with open(F'split-{args.split}-records-count.txt' , "w" ) as f:
print(F'Total {args.split} records: {total_records}' , file=lowerCamelCase__ )
if __name__ == "__main__":
__A =parse_args()
main(args)
| 463
| 1
|
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( __lowercase , unittest.TestCase ):
'''simple docstring'''
A_ = DDIMPipeline
A_ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
A_ = PipelineTesterMixin.required_optional_params - {
'num_images_per_prompt',
'latents',
'callback',
'callback_steps',
}
A_ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
A_ = False
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
_lowerCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
_lowerCamelCase = DDIMScheduler()
_lowerCamelCase = {'''unet''': unet, '''scheduler''': scheduler}
return components
def UpperCamelCase_ ( self , A_ , A_=0 ) -> int:
"""simple docstring"""
if str(A_ ).startswith('''mps''' ):
_lowerCamelCase = torch.manual_seed(A_ )
else:
_lowerCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
_lowerCamelCase = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = '''cpu'''
_lowerCamelCase = self.get_dummy_components()
_lowerCamelCase = self.pipeline_class(**A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
_lowerCamelCase = self.get_dummy_inputs(A_ )
_lowerCamelCase = pipe(**A_ ).images
_lowerCamelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
_lowerCamelCase = np.array(
[1.0_0_0E0_0, 5.7_1_7E-0_1, 4.7_1_7E-0_1, 1.0_0_0E0_0, 0.0_0_0E0_0, 1.0_0_0E0_0, 3.0_0_0E-0_4, 0.0_0_0E0_0, 9.0_0_0E-0_4] )
_lowerCamelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A_ , 1E-3 )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3 )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = '''google/ddpm-cifar10-32'''
_lowerCamelCase = UNetaDModel.from_pretrained(A_ )
_lowerCamelCase = DDIMScheduler()
_lowerCamelCase = DDIMPipeline(unet=A_ , scheduler=A_ )
ddim.to(A_ )
ddim.set_progress_bar_config(disable=A_ )
_lowerCamelCase = torch.manual_seed(0 )
_lowerCamelCase = ddim(generator=A_ , eta=0.0 , output_type='''numpy''' ).images
_lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCamelCase = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = '''google/ddpm-ema-bedroom-256'''
_lowerCamelCase = UNetaDModel.from_pretrained(A_ )
_lowerCamelCase = DDIMScheduler.from_pretrained(A_ )
_lowerCamelCase = DDIMPipeline(unet=A_ , scheduler=A_ )
ddpm.to(A_ )
ddpm.set_progress_bar_config(disable=A_ )
_lowerCamelCase = torch.manual_seed(0 )
_lowerCamelCase = ddpm(generator=A_ , output_type='''numpy''' ).images
_lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
_lowerCamelCase = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 717
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__lowercase ) , 'Tatoeba directory does not exist.' )
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = tempfile.mkdtemp()
return TatoebaConverter(save_dir=A_ )
@slow
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
self.resolver.convert_models(['''heb-eng'''] )
@slow
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=A_ )
assert mmeta["long_pair"] == "heb-eng"
| 638
| 0
|
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class UpperCAmelCase ( A__ ):
'''simple docstring'''
def __init__( self , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = False , lowercase__ = False , lowercase__ = None , **lowercase__ , ) -> List[str]:
SCREAMING_SNAKE_CASE : Dict = path_or_paths
SCREAMING_SNAKE_CASE : Any = split if split or isinstance(lowercase__ , lowercase__ ) else 'train'
SCREAMING_SNAKE_CASE : List[str] = features
SCREAMING_SNAKE_CASE : Dict = cache_dir
SCREAMING_SNAKE_CASE : Any = keep_in_memory
SCREAMING_SNAKE_CASE : List[Any] = streaming
SCREAMING_SNAKE_CASE : Any = num_proc
SCREAMING_SNAKE_CASE : Optional[int] = kwargs
@abstractmethod
def _UpperCamelCase ( self ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
pass
class UpperCAmelCase ( A__ ):
'''simple docstring'''
def __init__( self , lowercase__ = None , lowercase__ = None , lowercase__ = False , lowercase__ = False , lowercase__ = None , **lowercase__ , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : List[str] = features
SCREAMING_SNAKE_CASE : List[str] = cache_dir
SCREAMING_SNAKE_CASE : Any = keep_in_memory
SCREAMING_SNAKE_CASE : int = streaming
SCREAMING_SNAKE_CASE : int = num_proc
SCREAMING_SNAKE_CASE : Tuple = kwargs
@abstractmethod
def _UpperCamelCase ( self ) -> Union[Dataset, IterableDataset]:
pass
| 251
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _snake_case ( A__ , A__ , unittest.TestCase ):
_lowercase : List[Any] = StableDiffusionDiffEditPipeline
_lowercase : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
_lowercase : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
_lowercase : List[str] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowercase : List[str] = frozenset([] )
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a , )
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=a , set_alpha_to_one=a , )
SCREAMING_SNAKE_CASE = DDIMInverseScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=a , set_alpha_to_zero=a , )
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
SCREAMING_SNAKE_CASE = CLIPTextModel(a)
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
SCREAMING_SNAKE_CASE = {
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def SCREAMING_SNAKE_CASE__ ( self , a , a=0) -> List[Any]:
SCREAMING_SNAKE_CASE = floats_tensor((1, 16, 16) , rng=random.Random(a)).to(a)
SCREAMING_SNAKE_CASE = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(a)).to(a)
if str(a).startswith('mps'):
SCREAMING_SNAKE_CASE = torch.manual_seed(a)
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=a).manual_seed(a)
SCREAMING_SNAKE_CASE = {
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self , a , a=0) -> List[Any]:
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 32, 32) , rng=random.Random(a)).to(a)
SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1)[0]
SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(a)).convert('RGB')
if str(a).startswith('mps'):
SCREAMING_SNAKE_CASE = torch.manual_seed(a)
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=a).manual_seed(a)
SCREAMING_SNAKE_CASE = {
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self , a , a=0) -> Optional[int]:
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 32, 32) , rng=random.Random(a)).to(a)
SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1)[0]
SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(a)).convert('RGB')
if str(a).startswith('mps'):
SCREAMING_SNAKE_CASE = torch.manual_seed(a)
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=a).manual_seed(a)
SCREAMING_SNAKE_CASE = {
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
if not hasattr(self.pipeline_class , '_optional_components'):
return
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**a)
pipe.to(a)
pipe.set_progress_bar_config(disable=a)
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(a , a , a)
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components})
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(a)
SCREAMING_SNAKE_CASE = pipe(**a)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a)
SCREAMING_SNAKE_CASE = self.pipeline_class.from_pretrained(a)
pipe_loaded.to(a)
pipe_loaded.set_progress_bar_config(disable=a)
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(a , a) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(a)
SCREAMING_SNAKE_CASE = pipe_loaded(**a)[0]
SCREAMING_SNAKE_CASE = np.abs(output - output_loaded).max()
self.assertLess(a , 1E-4)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = 'cpu'
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**a)
pipe.to(a)
pipe.set_progress_bar_config(disable=a)
SCREAMING_SNAKE_CASE = self.get_dummy_mask_inputs(a)
SCREAMING_SNAKE_CASE = pipe.generate_mask(**a)
SCREAMING_SNAKE_CASE = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16))
SCREAMING_SNAKE_CASE = np.array([0] * 9)
SCREAMING_SNAKE_CASE = np.abs(mask_slice.flatten() - expected_slice).max()
self.assertLessEqual(a , 1E-3)
self.assertEqual(mask[0, -3, -4] , 0)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = 'cpu'
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**a)
pipe.to(a)
pipe.set_progress_bar_config(disable=a)
SCREAMING_SNAKE_CASE = self.get_dummy_inversion_inputs(a)
SCREAMING_SNAKE_CASE = pipe.invert(**a).images
SCREAMING_SNAKE_CASE = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3))
SCREAMING_SNAKE_CASE = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
SCREAMING_SNAKE_CASE = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(a , 1E-3)
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=5E-3)
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = 'cpu'
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = {'beta_start': 0.0_00_85, 'beta_end': 0.0_12, 'beta_schedule': 'scaled_linear'}
SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler(**a)
SCREAMING_SNAKE_CASE = DPMSolverMultistepInverseScheduler(**a)
SCREAMING_SNAKE_CASE = self.pipeline_class(**a)
pipe.to(a)
pipe.set_progress_bar_config(disable=a)
SCREAMING_SNAKE_CASE = self.get_dummy_inversion_inputs(a)
SCREAMING_SNAKE_CASE = pipe.invert(**a).images
SCREAMING_SNAKE_CASE = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3))
SCREAMING_SNAKE_CASE = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
SCREAMING_SNAKE_CASE = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(a , 1E-3)
@require_torch_gpu
@slow
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls) -> List[Any]:
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png')
SCREAMING_SNAKE_CASE = raw_image.convert('RGB').resize((768, 768))
SCREAMING_SNAKE_CASE = raw_image
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = torch.manual_seed(0)
SCREAMING_SNAKE_CASE = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=a , torch_dtype=torch.floataa)
SCREAMING_SNAKE_CASE = DDIMScheduler.from_config(pipe.scheduler.config)
SCREAMING_SNAKE_CASE = DDIMInverseScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a)
SCREAMING_SNAKE_CASE = 'a bowl of fruit'
SCREAMING_SNAKE_CASE = 'a bowl of pears'
SCREAMING_SNAKE_CASE = pipe.generate_mask(
image=self.raw_image , source_prompt=a , target_prompt=a , generator=a , )
SCREAMING_SNAKE_CASE = pipe.invert(
prompt=a , image=self.raw_image , inpaint_strength=0.7 , generator=a).latents
SCREAMING_SNAKE_CASE = pipe(
prompt=a , mask_image=a , image_latents=a , generator=a , negative_prompt=a , inpaint_strength=0.7 , output_type='numpy' , ).images[0]
SCREAMING_SNAKE_CASE = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png').resize((768, 768)))
/ 255
)
assert np.abs((expected_image - image).max()) < 5E-1
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = torch.manual_seed(0)
SCREAMING_SNAKE_CASE = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=a , torch_dtype=torch.floataa)
SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
SCREAMING_SNAKE_CASE = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a)
SCREAMING_SNAKE_CASE = 'a bowl of fruit'
SCREAMING_SNAKE_CASE = 'a bowl of pears'
SCREAMING_SNAKE_CASE = pipe.generate_mask(
image=self.raw_image , source_prompt=a , target_prompt=a , generator=a , )
SCREAMING_SNAKE_CASE = pipe.invert(
prompt=a , image=self.raw_image , inpaint_strength=0.7 , generator=a , num_inference_steps=25 , ).latents
SCREAMING_SNAKE_CASE = pipe(
prompt=a , mask_image=a , image_latents=a , generator=a , negative_prompt=a , inpaint_strength=0.7 , num_inference_steps=25 , output_type='numpy' , ).images[0]
SCREAMING_SNAKE_CASE = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png').resize((768, 768)))
/ 255
)
assert np.abs((expected_image - image).max()) < 5E-1
| 73
| 0
|
'''simple docstring'''
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
assert (
isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and number_of_steps > 0
), f'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
_UpperCamelCase , _UpperCamelCase =1, 1
for _ in range(number_of_steps - 1 ):
_UpperCamelCase , _UpperCamelCase =current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 271
|
'''simple docstring'''
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
__lowerCamelCase : List[Any] = 'bert-base-cased'
__lowerCamelCase : str = 'fp16'
__lowerCamelCase : Optional[int] = 'bf16'
__lowerCamelCase : List[Any] = [FPaa, BFaa]
@require_fsdp
@require_cuda
class UpperCAmelCase ( lowercase_):
"""simple docstring"""
def UpperCamelCase__ ( self : Tuple ) -> Union[str, Any]:
super().setUp()
_UpperCamelCase =dict(
ACCELERATE_USE_FSDP='''true''' , MASTER_ADDR='''localhost''' , MASTER_PORT='''10999''' , RANK='''0''' , LOCAL_RANK='''0''' , WORLD_SIZE='''1''' , )
def UpperCamelCase__ ( self : int ) -> List[Any]:
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(UpperCamelCase__ ):
_UpperCamelCase =self.dist_env.copy()
_UpperCamelCase =F'''{i + 1}'''
_UpperCamelCase =strategy
with mockenv_context(**UpperCamelCase__ ):
_UpperCamelCase =FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def UpperCamelCase__ ( self : Union[str, Any] ) -> Any:
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(UpperCamelCase__ ):
_UpperCamelCase =self.dist_env.copy()
_UpperCamelCase =prefetch_policy
with mockenv_context(**UpperCamelCase__ ):
_UpperCamelCase =FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def UpperCamelCase__ ( self : Optional[Any] ) -> Tuple:
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(UpperCamelCase__ ):
_UpperCamelCase =self.dist_env.copy()
_UpperCamelCase =state_dict_type
with mockenv_context(**UpperCamelCase__ ):
_UpperCamelCase =FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def UpperCamelCase__ ( self : Any ) -> List[Any]:
_UpperCamelCase =AutoModel.from_pretrained(UpperCamelCase__ )
for policy in FSDP_AUTO_WRAP_POLICY:
_UpperCamelCase =self.dist_env.copy()
_UpperCamelCase =policy
if policy == "TRANSFORMER_BASED_WRAP":
_UpperCamelCase ='''BertLayer'''
elif policy == "SIZE_BASED_WRAP":
_UpperCamelCase ='''2000'''
with mockenv_context(**UpperCamelCase__ ):
_UpperCamelCase =FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(UpperCamelCase__ )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
_UpperCamelCase =self.dist_env.copy()
_UpperCamelCase ='''TRANSFORMER_BASED_WRAP'''
_UpperCamelCase ='''T5Layer'''
with mockenv_context(**UpperCamelCase__ ):
_UpperCamelCase =FullyShardedDataParallelPlugin()
with self.assertRaises(UpperCamelCase__ ) as cm:
fsdp_plugin.set_auto_wrap_policy(UpperCamelCase__ )
self.assertTrue('''Could not find the transformer layer class to wrap in the model.''' in str(cm.exception ) )
_UpperCamelCase =self.dist_env.copy()
_UpperCamelCase ='''SIZE_BASED_WRAP'''
_UpperCamelCase ='''0'''
with mockenv_context(**UpperCamelCase__ ):
_UpperCamelCase =FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(UpperCamelCase__ )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def UpperCamelCase__ ( self : Any ) -> List[Any]:
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
_UpperCamelCase =self.dist_env.copy()
_UpperCamelCase =mp_dtype
with mockenv_context(**UpperCamelCase__ ):
_UpperCamelCase =Accelerator()
if mp_dtype == "fp16":
_UpperCamelCase =torch.floataa
elif mp_dtype == "bf16":
_UpperCamelCase =torch.bfloataa
_UpperCamelCase =MixedPrecision(param_dtype=UpperCamelCase__ , reduce_dtype=UpperCamelCase__ , buffer_dtype=UpperCamelCase__ )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , UpperCamelCase__ )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , UpperCamelCase__ ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(UpperCamelCase__ )
def UpperCamelCase__ ( self : Dict ) -> Tuple:
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
_UpperCamelCase =self.dist_env.copy()
_UpperCamelCase =str(UpperCamelCase__ ).lower()
with mockenv_context(**UpperCamelCase__ ):
_UpperCamelCase =FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=UpperCamelCase__ ) )
@require_fsdp
@require_multi_gpu
@slow
class UpperCAmelCase ( lowercase_):
"""simple docstring"""
def UpperCamelCase__ ( self : Union[str, Any] ) -> List[str]:
super().setUp()
_UpperCamelCase =0.82
_UpperCamelCase =[
'''fsdp_shard_grad_op_transformer_based_wrap''',
'''fsdp_full_shard_transformer_based_wrap''',
]
_UpperCamelCase ={
'''multi_gpu_fp16''': 3200,
'''fsdp_shard_grad_op_transformer_based_wrap_fp16''': 2000,
'''fsdp_full_shard_transformer_based_wrap_fp16''': 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
_UpperCamelCase =160
_UpperCamelCase =160
_UpperCamelCase =inspect.getfile(accelerate.test_utils )
_UpperCamelCase =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps'''] )
def UpperCamelCase__ ( self : int ) -> str:
_UpperCamelCase =os.path.join(self.test_scripts_folder , '''test_performance.py''' )
_UpperCamelCase =['''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', '''--use_fsdp''']
for config in self.performance_configs:
_UpperCamelCase =cmd.copy()
for i, strategy in enumerate(UpperCamelCase__ ):
if strategy.lower() in config:
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
break
if "fp32" in config:
cmd_config.append('''--mixed_precision=no''' )
else:
cmd_config.append('''--mixed_precision=fp16''' )
if "cpu_offload" in config:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
F'''--performance_lower_bound={self.performance_lower_bound}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
def UpperCamelCase__ ( self : Optional[Any] ) -> Any:
_UpperCamelCase =os.path.join(self.test_scripts_folder , '''test_checkpointing.py''' )
_UpperCamelCase =[
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
'''--use_fsdp''',
'''--mixed_precision=fp16''',
'''--fsdp_transformer_layer_cls_to_wrap=BertLayer''',
]
for i, strategy in enumerate(UpperCamelCase__ ):
_UpperCamelCase =cmd.copy()
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
if strategy != "FULL_SHARD":
continue
_UpperCamelCase =len(UpperCamelCase__ )
for state_dict_type in FSDP_STATE_DICT_TYPE:
_UpperCamelCase =cmd_config[:state_dict_config_index]
cmd_config.append(F'''--fsdp_state_dict_type={state_dict_type}''' )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
'''--partial_train_epoch=1''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
_UpperCamelCase =cmd_config[:-1]
_UpperCamelCase =os.path.join(self.tmpdir , '''epoch_0''' )
cmd_config.extend(
[
F'''--resume_from_checkpoint={resume_from_checkpoint}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
def UpperCamelCase__ ( self : Dict ) -> int:
_UpperCamelCase =os.path.join(self.test_scripts_folder , '''test_peak_memory_usage.py''' )
_UpperCamelCase =[
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
_UpperCamelCase =cmd.copy()
if "fp16" in spec:
cmd_config.extend(['''--mixed_precision=fp16'''] )
else:
cmd_config.extend(['''--mixed_precision=no'''] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['''--use_fsdp'''] )
for i, strategy in enumerate(UpperCamelCase__ ):
if strategy.lower() in spec:
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
break
if "cpu_offload" in spec:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
F'''--peak_memory_upper_bound={peak_mem_upper_bound}''',
F'''--n_train={self.n_train}''',
F'''--n_val={self.n_val}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
| 271
| 1
|
'''simple docstring'''
def UpperCamelCase_ ( snake_case_ : int=2_81_23 ) -> Tuple:
'''simple docstring'''
__lowerCAmelCase = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
__lowerCAmelCase = set()
__lowerCAmelCase = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(snake_case_ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 427
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase_ ( snake_case_ : list[int | float] , snake_case_ : int , snake_case_ : int ) -> int | float:
'''simple docstring'''
if len(snake_case_ ) == 0:
raise ValueError("""find_max() arg is an empty sequence""" )
if (
left >= len(snake_case_ )
or left < -len(snake_case_ )
or right >= len(snake_case_ )
or right < -len(snake_case_ )
):
raise IndexError("""list index out of range""" )
if left == right:
return nums[left]
__lowerCAmelCase = (left + right) >> 1 # the middle
__lowerCAmelCase = find_max(snake_case_ , snake_case_ , snake_case_ ) # find max in range[left, mid]
__lowerCAmelCase = find_max(snake_case_ , mid + 1 , snake_case_ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 427
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCAmelCase = {
'configuration_chinese_clip': [
'CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ChineseCLIPConfig',
'ChineseCLIPOnnxConfig',
'ChineseCLIPTextConfig',
'ChineseCLIPVisionConfig',
],
'processing_chinese_clip': ['ChineseCLIPProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['ChineseCLIPFeatureExtractor']
__UpperCAmelCase = ['ChineseCLIPImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ChineseCLIPModel',
'ChineseCLIPPreTrainedModel',
'ChineseCLIPTextModel',
'ChineseCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 708
|
'''simple docstring'''
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __a ( self ,__SCREAMING_SNAKE_CASE ):
return 0.0
def SCREAMING_SNAKE_CASE_ ( snake_case_ : np.ndarray , snake_case_ : int ) -> tuple[int | float, int | float]:
SCREAMING_SNAKE_CASE : Optional[int] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
SCREAMING_SNAKE_CASE : int = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def SCREAMING_SNAKE_CASE_ ( snake_case_ : FilterType , snake_case_ : int ) -> None:
SCREAMING_SNAKE_CASE : List[str] = 512
SCREAMING_SNAKE_CASE : Tuple = [1] + [0] * (size - 1)
SCREAMING_SNAKE_CASE : str = [filter_type.process(snake_case_ ) for item in inputs]
SCREAMING_SNAKE_CASE : Dict = [0] * (samplerate - size) # zero-padding
outputs += filler
SCREAMING_SNAKE_CASE : int = np.abs(np.fft.fft(snake_case_ ) )
SCREAMING_SNAKE_CASE : Optional[int] = 20 * np.logaa(snake_case_ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
# Display within reasonable bounds
SCREAMING_SNAKE_CASE : Any = get_bounds(snake_case_ , snake_case_ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('Gain (dB)' )
plt.plot(snake_case_ )
plt.show()
def SCREAMING_SNAKE_CASE_ ( snake_case_ : FilterType , snake_case_ : int ) -> None:
SCREAMING_SNAKE_CASE : Union[str, Any] = 512
SCREAMING_SNAKE_CASE : str = [1] + [0] * (size - 1)
SCREAMING_SNAKE_CASE : List[Any] = [filter_type.process(snake_case_ ) for item in inputs]
SCREAMING_SNAKE_CASE : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
SCREAMING_SNAKE_CASE : str = np.angle(np.fft.fft(snake_case_ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('Phase shift (Radians)' )
plt.plot(np.unwrap(snake_case_ , -2 * pi ) )
plt.show()
| 220
| 0
|
from collections import defaultdict
from math import ceil, sqrt
def A ( lowercase__ : int = 100_0000 , lowercase__ : int = 10 ) -> int:
UpperCamelCase__ :defaultdict = defaultdict(lowercase__ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
UpperCamelCase__ :Tuple = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
UpperCamelCase__ :Union[str, Any] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowercase__ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45
|
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
snake_case__ = logging.get_logger(__name__)
def lowerCamelCase__ ( ) -> List[Any]:
"""simple docstring"""
# Get the sagemaker specific mp parameters from smp_options variable.
a__ :str = os.getenv("SM_HP_MP_PARAMETERS" , "{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
a__ :str = json.loads(a )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
a__ :Dict = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
a__ :str = json.loads(a )
if not mpi_options.get("sagemaker_mpi_enabled" , a ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class lowerCAmelCase_ ( _a):
lowerCamelCase_ = field(
default='' ,metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'} ,)
def _snake_case ( self : List[str] ) ->int:
"""simple docstring"""
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , __A , )
@cached_property
def _snake_case ( self : List[Any] ) ->"torch.device":
"""simple docstring"""
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
a__ :str = torch.device("cpu" )
a__ :Optional[Any] = 0
elif is_sagemaker_model_parallel_available():
a__ :Union[str, Any] = smp.local_rank()
a__ :Tuple = torch.device("cuda" , __A )
a__ :Any = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
a__ :Optional[Any] = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
a__ :Any = torch.device("cuda" , self.local_rank )
a__ :List[Any] = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
a__ :Optional[Any] = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
a__ :Any = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
a__ :List[Any] = torch.device("cuda" , self.local_rank )
a__ :Union[str, Any] = 1
if device.type == "cuda":
torch.cuda.set_device(__A )
return device
@property
def _snake_case ( self : Union[str, Any] ) ->Any:
"""simple docstring"""
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def _snake_case ( self : int ) ->Dict:
"""simple docstring"""
return not is_sagemaker_model_parallel_available()
@property
def _snake_case ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
return False
| 395
| 0
|
from typing import Any
import numpy as np
def A_ ( snake_case : np.ndarray ) -> bool:
'''simple docstring'''
return np.array_equal(snake_case , matrix.conjugate().T )
def A_ ( snake_case : np.ndarray , snake_case : np.ndarray ) -> Any:
'''simple docstring'''
__UpperCamelCase = v.conjugate().T
__UpperCamelCase = v_star.dot(snake_case )
assert isinstance(snake_case , np.ndarray )
return (v_star_dot.dot(snake_case )) / (v_star.dot(snake_case ))
def A_ ( ) -> None:
'''simple docstring'''
__UpperCamelCase = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
__UpperCamelCase = np.array([[1], [2], [3]] )
assert is_hermitian(snake_case ), f"{a} is not hermitian."
print(rayleigh_quotient(snake_case , snake_case ) )
__UpperCamelCase = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(snake_case ), f"{a} is not hermitian."
assert rayleigh_quotient(snake_case , snake_case ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 451
|
def A_ ( snake_case : float ) -> float:
'''simple docstring'''
if edge <= 0 or not isinstance(snake_case , snake_case ):
raise ValueError('''Length must be a positive.''' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def A_ ( snake_case : float ) -> float:
'''simple docstring'''
if edge <= 0 or not isinstance(snake_case , snake_case ):
raise ValueError('''Length must be a positive.''' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 451
| 1
|
from timeit import timeit
lowerCamelCase__ = {
'''MALAYALAM''': True,
'''String''': False,
'''rotor''': True,
'''level''': True,
'''A''': True,
'''BB''': True,
'''ABC''': False,
'''amanaplanacanalpanama''': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def lowerCAmelCase__ ( a__ ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase = 0
_UpperCamelCase = len(__a ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def lowerCAmelCase__ ( a__ ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase = len(__a ) // 2
_UpperCamelCase = len(__a )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(__a ) )
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
if len(__a ) <= 2:
return True
if s[0] == s[len(__a ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def lowerCAmelCase__ ( a__ ) ->Optional[Any]:
'''simple docstring'''
return s == s[::-1]
def lowerCAmelCase__ ( a__ ) ->str:
'''simple docstring'''
_UpperCamelCase = f'all({name}(key) is value for key, value in test_data.items())'
_UpperCamelCase = f'from __main__ import test_data, {name}'
_UpperCamelCase = 500_000
_UpperCamelCase = timeit(stmt=__a , setup=__a , number=__a )
print(f'{name:<35} finished {number:,} runs in {result:.5f} seconds' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(F"{key:21} {value}")
print('''a man a plan a canal panama''')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('''is_palindrome_slice''')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('''is_palindrome''')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('''is_palindrome_recursive''')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('''is_palindrome_traversal''')
| 547
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
UpperCamelCase_ : Dict = None
UpperCamelCase_ : int = logging.get_logger(__name__)
UpperCamelCase_ : str = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase_ : List[Any] = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
UpperCamelCase_ : Optional[Any] = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
UpperCamelCase_ : str = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class __lowerCAmelCase ( _lowercase ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = ["input_ids", "attention_mask"]
snake_case = MBartTokenizer
snake_case = []
snake_case = []
def __init__( self : List[str] , _snake_case : Tuple=None , _snake_case : int=None , _snake_case : List[Any]="<s>" , _snake_case : Tuple="</s>" , _snake_case : str="</s>" , _snake_case : List[Any]="<s>" , _snake_case : Dict="<unk>" , _snake_case : str="<pad>" , _snake_case : Any="<mask>" , _snake_case : int=None , _snake_case : Optional[int]=None , _snake_case : Any=None , **_snake_case : Union[str, Any] , ) -> List[str]:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
A_ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
super().__init__(
vocab_file=_snake_case , tokenizer_file=_snake_case , bos_token=_snake_case , eos_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , unk_token=_snake_case , pad_token=_snake_case , mask_token=_snake_case , src_lang=_snake_case , tgt_lang=_snake_case , additional_special_tokens=_snake_case , **_snake_case , )
A_ = vocab_file
A_ = False if not self.vocab_file else True
A_ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
A_ = {
lang_code: self.convert_tokens_to_ids(_snake_case ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
A_ = src_lang if src_lang is not None else "en_XX"
A_ = self.convert_tokens_to_ids(self._src_lang )
A_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowerCamelCase__ ( self : Dict ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def lowerCamelCase__ ( self : Tuple , _snake_case : str ) -> None:
"""simple docstring"""
A_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCamelCase__ ( self : Optional[Any] , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCamelCase__ ( self : Dict , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__ ( self : List[Any] , _snake_case : str , _snake_case : str , _snake_case : Optional[str] , _snake_case : Optional[str] , **_snake_case : Optional[int] ) -> str:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
A_ = src_lang
A_ = self(_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , **_snake_case )
A_ = self.convert_tokens_to_ids(_snake_case )
A_ = tgt_lang_id
return inputs
def lowerCamelCase__ ( self : Dict , _snake_case : List[str] , _snake_case : str = "en_XX" , _snake_case : Optional[List[str]] = None , _snake_case : str = "ro_RO" , **_snake_case : str , ) -> BatchEncoding:
"""simple docstring"""
A_ = src_lang
A_ = tgt_lang
return super().prepare_seqaseq_batch(_snake_case , _snake_case , **_snake_case )
def lowerCamelCase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCamelCase__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCamelCase__ ( self : Tuple , _snake_case : List[str] ) -> None:
"""simple docstring"""
A_ = self.convert_tokens_to_ids(_snake_case )
A_ = []
A_ = [self.eos_token_id, self.cur_lang_code]
A_ = self.convert_ids_to_tokens(self.prefix_tokens )
A_ = self.convert_ids_to_tokens(self.suffix_tokens )
A_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCamelCase__ ( self : List[str] , _snake_case : str ) -> None:
"""simple docstring"""
A_ = self.convert_tokens_to_ids(_snake_case )
A_ = []
A_ = [self.eos_token_id, self.cur_lang_code]
A_ = self.convert_ids_to_tokens(self.prefix_tokens )
A_ = self.convert_ids_to_tokens(self.suffix_tokens )
A_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCamelCase__ ( self : Union[str, Any] , _snake_case : str , _snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(_snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
A_ = os.path.join(
_snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ):
copyfile(self.vocab_file , _snake_case )
return (out_vocab_file,)
| 115
| 0
|
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : int=1 ) -> Tuple:
"""simple docstring"""
if n_shave_prefix_segments >= 0:
return ".".join(path.split("." )[n_shave_prefix_segments:] )
else:
return ".".join(path.split("." )[:n_shave_prefix_segments] )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any]=0 ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : str = []
for old_item in old_list:
UpperCAmelCase_ : Tuple = old_item.replace("in_layers.0" , "norm1" )
UpperCAmelCase_ : Any = new_item.replace("in_layers.2" , "conv1" )
UpperCAmelCase_ : Any = new_item.replace("out_layers.0" , "norm2" )
UpperCAmelCase_ : Any = new_item.replace("out_layers.3" , "conv2" )
UpperCAmelCase_ : str = new_item.replace("emb_layers.1" , "time_emb_proj" )
UpperCAmelCase_ : List[str] = new_item.replace("skip_connection" , "conv_shortcut" )
UpperCAmelCase_ : int = shave_segments(_SCREAMING_SNAKE_CASE , n_shave_prefix_segments=_SCREAMING_SNAKE_CASE )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str]=0 ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = []
for old_item in old_list:
UpperCAmelCase_ : Tuple = old_item
UpperCAmelCase_ : Dict = new_item.replace("norm.weight" , "group_norm.weight" )
UpperCAmelCase_ : Union[str, Any] = new_item.replace("norm.bias" , "group_norm.bias" )
UpperCAmelCase_ : Dict = new_item.replace("proj_out.weight" , "proj_attn.weight" )
UpperCAmelCase_ : Tuple = new_item.replace("proj_out.bias" , "proj_attn.bias" )
UpperCAmelCase_ : Union[str, Any] = shave_segments(_SCREAMING_SNAKE_CASE , n_shave_prefix_segments=_SCREAMING_SNAKE_CASE )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Dict=None , _SCREAMING_SNAKE_CASE : int=None , _SCREAMING_SNAKE_CASE : List[Any]=None ) -> int:
"""simple docstring"""
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
UpperCAmelCase_ : Any = old_checkpoint[path]
UpperCAmelCase_ : Dict = old_tensor.shape[0] // 3
UpperCAmelCase_ : List[str] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
UpperCAmelCase_ : Optional[int] = old_tensor.shape[0] // config["num_head_channels"] // 3
UpperCAmelCase_ : int = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
UpperCAmelCase_ : Any = old_tensor.split(channels // num_heads , dim=1 )
UpperCAmelCase_ : Dict = query.reshape(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = key.reshape(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = value.reshape(_SCREAMING_SNAKE_CASE )
for path in paths:
UpperCAmelCase_ : int = path["new"]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
UpperCAmelCase_ : List[str] = new_path.replace("middle_block.0" , "mid_block.resnets.0" )
UpperCAmelCase_ : Dict = new_path.replace("middle_block.1" , "mid_block.attentions.0" )
UpperCAmelCase_ : List[Any] = new_path.replace("middle_block.2" , "mid_block.resnets.1" )
if additional_replacements is not None:
for replacement in additional_replacements:
UpperCAmelCase_ : Any = new_path.replace(replacement["old"] , replacement["new"] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
UpperCAmelCase_ : List[Any] = old_checkpoint[path["old"]][:, :, 0]
else:
UpperCAmelCase_ : int = old_checkpoint[path["old"]]
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[int] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Any = {}
UpperCAmelCase_ : Optional[Any] = checkpoint["time_embed.0.weight"]
UpperCAmelCase_ : int = checkpoint["time_embed.0.bias"]
UpperCAmelCase_ : Any = checkpoint["time_embed.2.weight"]
UpperCAmelCase_ : Any = checkpoint["time_embed.2.bias"]
UpperCAmelCase_ : int = checkpoint["input_blocks.0.0.weight"]
UpperCAmelCase_ : Dict = checkpoint["input_blocks.0.0.bias"]
UpperCAmelCase_ : str = checkpoint["out.0.weight"]
UpperCAmelCase_ : str = checkpoint["out.0.bias"]
UpperCAmelCase_ : str = checkpoint["out.2.weight"]
UpperCAmelCase_ : str = checkpoint["out.2.bias"]
# Retrieves the keys for the input blocks only
UpperCAmelCase_ : str = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "input_blocks" in layer} )
UpperCAmelCase_ : int = {
layer_id: [key for key in checkpoint if F'''input_blocks.{layer_id}''' in key]
for layer_id in range(_SCREAMING_SNAKE_CASE )
}
# Retrieves the keys for the middle blocks only
UpperCAmelCase_ : Any = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "middle_block" in layer} )
UpperCAmelCase_ : List[Any] = {
layer_id: [key for key in checkpoint if F'''middle_block.{layer_id}''' in key]
for layer_id in range(_SCREAMING_SNAKE_CASE )
}
# Retrieves the keys for the output blocks only
UpperCAmelCase_ : List[str] = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "output_blocks" in layer} )
UpperCAmelCase_ : Union[str, Any] = {
layer_id: [key for key in checkpoint if F'''output_blocks.{layer_id}''' in key]
for layer_id in range(_SCREAMING_SNAKE_CASE )
}
for i in range(1 , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Dict = (i - 1) // (config["num_res_blocks"] + 1)
UpperCAmelCase_ : Optional[Any] = (i - 1) % (config["num_res_blocks"] + 1)
UpperCAmelCase_ : Tuple = [key for key in input_blocks[i] if F'''input_blocks.{i}.0''' in key]
UpperCAmelCase_ : Tuple = [key for key in input_blocks[i] if F'''input_blocks.{i}.1''' in key]
if F'''input_blocks.{i}.0.op.weight''' in checkpoint:
UpperCAmelCase_ : str = checkpoint[
F'''input_blocks.{i}.0.op.weight'''
]
UpperCAmelCase_ : List[str] = checkpoint[
F'''input_blocks.{i}.0.op.bias'''
]
continue
UpperCAmelCase_ : Union[str, Any] = renew_resnet_paths(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = {"old": F'''input_blocks.{i}.0''', "new": F'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''}
UpperCAmelCase_ : List[Any] = {"old": "resnets.2.op", "new": "downsamplers.0.op"}
assign_to_checkpoint(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , additional_replacements=[meta_path, resnet_op] , config=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : str = renew_attention_paths(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = {
"old": F'''input_blocks.{i}.1''',
"new": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
UpperCAmelCase_ : Optional[Any] = {
F'''input_blocks.{i}.1.qkv.bias''': {
"key": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
"query": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
"value": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''input_blocks.{i}.1.qkv.weight''': {
"key": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
"query": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
"value": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , attention_paths_to_split=_SCREAMING_SNAKE_CASE , config=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase_ : Any = middle_blocks[0]
UpperCAmelCase_ : List[str] = middle_blocks[1]
UpperCAmelCase_ : Tuple = middle_blocks[2]
UpperCAmelCase_ : Union[str, Any] = renew_resnet_paths(_SCREAMING_SNAKE_CASE )
assign_to_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = renew_resnet_paths(_SCREAMING_SNAKE_CASE )
assign_to_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = renew_attention_paths(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = {
"middle_block.1.qkv.bias": {
"key": "mid_block.attentions.0.key.bias",
"query": "mid_block.attentions.0.query.bias",
"value": "mid_block.attentions.0.value.bias",
},
"middle_block.1.qkv.weight": {
"key": "mid_block.attentions.0.key.weight",
"query": "mid_block.attentions.0.query.weight",
"value": "mid_block.attentions.0.value.weight",
},
}
assign_to_checkpoint(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , attention_paths_to_split=_SCREAMING_SNAKE_CASE , config=_SCREAMING_SNAKE_CASE )
for i in range(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Union[str, Any] = i // (config["num_res_blocks"] + 1)
UpperCAmelCase_ : int = i % (config["num_res_blocks"] + 1)
UpperCAmelCase_ : List[Any] = [shave_segments(_SCREAMING_SNAKE_CASE , 2 ) for name in output_blocks[i]]
UpperCAmelCase_ : Union[str, Any] = {}
for layer in output_block_layers:
UpperCAmelCase_ : str = layer.split("." )[0], shave_segments(_SCREAMING_SNAKE_CASE , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : Optional[int] = [layer_name]
if len(_SCREAMING_SNAKE_CASE ) > 1:
UpperCAmelCase_ : str = [key for key in output_blocks[i] if F'''output_blocks.{i}.0''' in key]
UpperCAmelCase_ : Any = [key for key in output_blocks[i] if F'''output_blocks.{i}.1''' in key]
UpperCAmelCase_ : Union[str, Any] = renew_resnet_paths(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = renew_resnet_paths(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = {"old": F'''output_blocks.{i}.0''', "new": F'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''}
assign_to_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , config=_SCREAMING_SNAKE_CASE )
if ["conv.weight", "conv.bias"] in output_block_list.values():
UpperCAmelCase_ : Any = list(output_block_list.values() ).index(["conv.weight", "conv.bias"] )
UpperCAmelCase_ : str = checkpoint[
F'''output_blocks.{i}.{index}.conv.weight'''
]
UpperCAmelCase_ : Any = checkpoint[
F'''output_blocks.{i}.{index}.conv.bias'''
]
# Clear attentions as they have been attributed above.
if len(_SCREAMING_SNAKE_CASE ) == 2:
UpperCAmelCase_ : Dict = []
if len(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : str = renew_attention_paths(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = {
"old": F'''output_blocks.{i}.1''',
"new": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
UpperCAmelCase_ : List[str] = {
F'''output_blocks.{i}.1.qkv.bias''': {
"key": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
"query": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
"value": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''output_blocks.{i}.1.qkv.weight''': {
"key": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
"query": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
"value": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("qkv" in key for key in attentions ) else None , config=_SCREAMING_SNAKE_CASE , )
else:
UpperCAmelCase_ : List[str] = renew_resnet_paths(_SCREAMING_SNAKE_CASE , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
UpperCAmelCase_ : List[Any] = ".".join(["output_blocks", str(_SCREAMING_SNAKE_CASE ), path["old"]] )
UpperCAmelCase_ : Dict = ".".join(["up_blocks", str(_SCREAMING_SNAKE_CASE ), "resnets", str(_SCREAMING_SNAKE_CASE ), path["new"]] )
UpperCAmelCase_ : List[str] = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
_lowerCamelCase = parser.parse_args()
_lowerCamelCase = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
_lowerCamelCase = json.loads(f.read())
_lowerCamelCase = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
_lowerCamelCase = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
_lowerCamelCase = DDPMScheduler.from_config("""/""".join(args.checkpoint_path.split("""/""")[:-1]))
_lowerCamelCase = VQModel.from_pretrained("""/""".join(args.checkpoint_path.split("""/""")[:-1]))
_lowerCamelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 710
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
def a__ ( _SCREAMING_SNAKE_CASE : list ) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
UpperCAmelCase_ : Tuple = {"+", "-", "*", "/"}
UpperCAmelCase_ : list[Any] = []
for token in postfix_notation:
if token in operations:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(_SCREAMING_SNAKE_CASE ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.