code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int = 1000 ):
lowerCAmelCase = -1
lowerCAmelCase = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
lowerCAmelCase = (n * n - 2 * a * n) // (2 * n - 2 * a)
lowerCAmelCase = n - a - b
if c * c == (a * a + b * b):
lowerCAmelCase = a * b * c
if candidate >= product:
lowerCAmelCase = candidate
return product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 4
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : str = {
"google/mobilenet_v2_1.4_224": "https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json",
"google/mobilenet_v2_1.0_224": "https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json",
"google/mobilenet_v2_0.75_160": "https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json",
"google/mobilenet_v2_0.35_96": "https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class a_ ( SCREAMING_SNAKE_CASE__ ):
A = '''mobilenet_v2'''
def __init__( self , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=224 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=8 , SCREAMING_SNAKE_CASE=8 , SCREAMING_SNAKE_CASE=6 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="relu6" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=0.8 , SCREAMING_SNAKE_CASE=0.0_2 , SCREAMING_SNAKE_CASE=0.0_0_1 , SCREAMING_SNAKE_CASE=255 , **SCREAMING_SNAKE_CASE , ) -> Dict:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = depth_multiplier
SCREAMING_SNAKE_CASE_ = depth_divisible_by
SCREAMING_SNAKE_CASE_ = min_depth
SCREAMING_SNAKE_CASE_ = expand_ratio
SCREAMING_SNAKE_CASE_ = output_stride
SCREAMING_SNAKE_CASE_ = first_layer_is_expansion
SCREAMING_SNAKE_CASE_ = finegrained_output
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = tf_padding
SCREAMING_SNAKE_CASE_ = classifier_dropout_prob
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = semantic_loss_ignore_index
class a_ ( SCREAMING_SNAKE_CASE__ ):
A = version.parse('''1.11''' )
@property
def A_( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def A_( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def A_( self ) -> float:
"""simple docstring"""
return 1e-4
| 205
| 0
|
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Optional[int]:
"""simple docstring"""
snake_case_ = [False] * len(SCREAMING_SNAKE_CASE )
snake_case_ = [-1] * len(SCREAMING_SNAKE_CASE )
def dfs(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
snake_case_ = True
snake_case_ = c
for u in graph[v]:
if not visited[u]:
dfs(SCREAMING_SNAKE_CASE , 1 - c )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if not visited[i]:
dfs(SCREAMING_SNAKE_CASE , 0 )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
UpperCAmelCase = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 531
|
from collections import namedtuple
UpperCAmelCase = namedtuple("""from_to""", """from_ to""")
UpperCAmelCase = {
"""cubicmeter""": from_to(1, 1),
"""litre""": from_to(0.001, 1000),
"""kilolitre""": from_to(1, 1),
"""gallon""": from_to(0.00_454, 264.172),
"""cubicyard""": from_to(0.76_455, 1.30_795),
"""cubicfoot""": from_to(0.028, 35.3_147),
"""cup""": from_to(0.000_236_588, 4_226.75),
}
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> float:
"""simple docstring"""
if from_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ ''', '''.join(SCREAMING_SNAKE_CASE ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ ''', '''.join(SCREAMING_SNAKE_CASE ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 531
| 1
|
import sys
import turtle
def _snake_case (__lowercase , __lowercase):
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , ):
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1])
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1])
my_pen.goto(vertexa[0] , vertexa[1])
my_pen.goto(vertexa[0] , vertexa[1])
if depth == 0:
return
triangle(__lowercase , get_mid(__lowercase , __lowercase) , get_mid(__lowercase , __lowercase) , depth - 1)
triangle(__lowercase , get_mid(__lowercase , __lowercase) , get_mid(__lowercase , __lowercase) , depth - 1)
triangle(__lowercase , get_mid(__lowercase , __lowercase) , get_mid(__lowercase , __lowercase) , depth - 1)
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"""Correct format for using this script: """
"""python fractals.py <int:depth_for_fractal>"""
)
snake_case__ : Tuple = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("""red""")
snake_case__ : Optional[Any] = [(-1_7_5, -1_2_5), (0, 1_7_5), (1_7_5, -1_2_5)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 23
|
'''simple docstring'''
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class a__:
def __init__( self : List[Any] , __snake_case : str ):
if isinstance(__snake_case , __snake_case ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
a : Union[str, Any] = deepcopy(__snake_case )
elif os.path.exists(__snake_case ):
with io.open(__snake_case , 'r' , encoding='utf-8' ) as f:
a : List[str] = json.load(__snake_case )
else:
try:
a : str = baseaa.urlsafe_baadecode(__snake_case ).decode('utf-8' )
a : Union[str, Any] = json.loads(__snake_case )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
F"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" )
a : Optional[int] = config
self.set_stage_and_offload()
def lowercase_ ( self : Any ):
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
a : List[str] = self.get_value('zero_optimization.stage' , -1 )
# offload
a : Union[str, Any] = False
if self.is_zeroa() or self.is_zeroa():
a : Any = set(['cpu', 'nvme'] )
a : Tuple = set(
[
self.get_value('zero_optimization.offload_optimizer.device' ),
self.get_value('zero_optimization.offload_param.device' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
a : Optional[int] = True
def lowercase_ ( self : str , __snake_case : Optional[Any] ):
a : Any = self.config
# find the config node of interest if it exists
a : Dict = ds_key_long.split('.' )
a : str = nodes.pop()
for node in nodes:
a : Tuple = config.get(__snake_case )
if config is None:
return None, ds_key
return config, ds_key
def lowercase_ ( self : int , __snake_case : Optional[int] , __snake_case : List[str]=None ):
a , a : int = self.find_config_node(__snake_case )
if config is None:
return default
return config.get(__snake_case , __snake_case )
def lowercase_ ( self : Dict , __snake_case : List[str] , __snake_case : List[Any]=False ):
a : Union[str, Any] = self.config
# find the config node of interest if it exists
a : Tuple = ds_key_long.split('.' )
for node in nodes:
a : Optional[int] = config
a : List[str] = config.get(__snake_case )
if config is None:
if must_exist:
raise ValueError(F"""Can't find {ds_key_long} entry in the config: {self.config}""" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(__snake_case )
def lowercase_ ( self : List[str] , __snake_case : Dict ):
a : Optional[Any] = self.get_value(__snake_case )
return False if value is None else bool(__snake_case )
def lowercase_ ( self : str , __snake_case : Optional[Any] ):
a : Optional[Any] = self.get_value(__snake_case )
return False if value is None else not bool(__snake_case )
def lowercase_ ( self : Tuple ):
return self._stage == 2
def lowercase_ ( self : str ):
return self._stage == 3
def lowercase_ ( self : int ):
return self._offload
class a__:
def __init__( self : str , __snake_case : Any ):
a : Any = engine
def lowercase_ ( self : Optional[Any] , __snake_case : int , **__snake_case : List[Any] ):
# runs backpropagation and handles mixed precision
self.engine.backward(__snake_case , **__snake_case )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class a__( lowerCamelCase__ ):
def __init__( self : Dict , __snake_case : Any ):
super().__init__(__snake_case , device_placement=__snake_case , scaler=__snake_case )
a : List[Any] = hasattr(self.optimizer , 'overflow' )
def lowercase_ ( self : Optional[Any] , __snake_case : str=None ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def lowercase_ ( self : int ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def lowercase_ ( self : Any ):
if self.__has_overflow__:
return self.optimizer.overflow
return False
class a__( lowerCamelCase__ ):
def __init__( self : str , __snake_case : List[Any] , __snake_case : str ):
super().__init__(__snake_case , __snake_case )
def lowercase_ ( self : Union[str, Any] ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class a__:
def __init__( self : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : int=0.001 , __snake_case : Any=0 , **__snake_case : List[Any] ):
a : Optional[Any] = params
a : Union[str, Any] = lr
a : Any = weight_decay
a : Any = kwargs
class a__:
def __init__( self : Any , __snake_case : int , __snake_case : int=None , __snake_case : Dict=0 , **__snake_case : Optional[int] ):
a : Optional[int] = optimizer
a : List[str] = total_num_steps
a : Tuple = warmup_num_steps
a : Optional[Any] = kwargs
| 526
| 0
|
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
lowerCAmelCase_ = pytest.mark.integration
lowerCAmelCase_ = {'comet'}
lowerCAmelCase_ = importlib.util.find_spec('fairseq') is not None
lowerCAmelCase_ = {'code_eval'}
lowerCAmelCase_ = os.name == 'nt'
lowerCAmelCase_ = {'bertscore', 'frugalscore', 'perplexity'}
lowerCAmelCase_ = importlib.util.find_spec('transformers') is not None
def snake_case ( UpperCAmelCase : int ):
@wraps(UpperCAmelCase )
def wrapper(self : Tuple, UpperCAmelCase : List[str] ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('"test requires Fairseq"' )
else:
test_case(self, UpperCAmelCase )
return wrapper
def snake_case ( UpperCAmelCase : Tuple ):
@wraps(UpperCAmelCase )
def wrapper(self : int, UpperCAmelCase : str ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('"test requires transformers"' )
else:
test_case(self, UpperCAmelCase )
return wrapper
def snake_case ( UpperCAmelCase : Union[str, Any] ):
@wraps(UpperCAmelCase )
def wrapper(self : str, UpperCAmelCase : Dict ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('"test not supported on Windows"' )
else:
test_case(self, UpperCAmelCase )
return wrapper
def snake_case ( ):
A = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('./metrics/*/' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
snake_case__ , snake_case__ , snake_case__ )
@local
class UpperCamelCase ( parameterized.TestCase ):
"""simple docstring"""
snake_case = {}
snake_case = None
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:load_metric is deprecated:FutureWarning' )
def A( self : Union[str, Any] ,_SCREAMING_SNAKE_CASE : List[str] ) -> Dict:
'''simple docstring'''
A = '[...]'
A = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' ,_SCREAMING_SNAKE_CASE ) ).module_path )
A = datasets.load.import_main_class(metric_module.__name__ ,dataset=_SCREAMING_SNAKE_CASE )
# check parameters
A = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(_SCREAMING_SNAKE_CASE ,metric_module.__name__ ):
with self.use_local_metrics():
try:
A = doctest.testmod(_SCREAMING_SNAKE_CASE ,verbose=_SCREAMING_SNAKE_CASE ,raise_on_error=_SCREAMING_SNAKE_CASE )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed ,0 )
self.assertGreater(results.attempted ,1 )
@slow
def A( self : int ,_SCREAMING_SNAKE_CASE : Tuple ) -> Tuple:
'''simple docstring'''
A = '[...]'
A = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' ,_SCREAMING_SNAKE_CASE ) ).module_path )
# run doctest
with self.use_local_metrics():
A = doctest.testmod(_SCREAMING_SNAKE_CASE ,verbose=_SCREAMING_SNAKE_CASE ,raise_on_error=_SCREAMING_SNAKE_CASE )
self.assertEqual(results.failed ,0 )
self.assertGreater(results.attempted ,1 )
@contextmanager
def A( self : Union[str, Any] ,_SCREAMING_SNAKE_CASE : Optional[int] ,_SCREAMING_SNAKE_CASE : int ) -> Optional[int]:
'''simple docstring'''
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](_SCREAMING_SNAKE_CASE ):
yield
else:
yield
@contextmanager
def A( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
def load_local_metric(_SCREAMING_SNAKE_CASE : List[str] ,*_SCREAMING_SNAKE_CASE : List[str] ,**_SCREAMING_SNAKE_CASE : Dict ):
return load_metric(os.path.join('metrics' ,_SCREAMING_SNAKE_CASE ) ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
with patch('datasets.load_metric' ) as mock_load_metric:
A = load_local_metric
yield
@classmethod
def A( cls : List[str] ,_SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Any:
'''simple docstring'''
def wrapper(_SCREAMING_SNAKE_CASE : str ):
A = contextmanager(_SCREAMING_SNAKE_CASE )
A = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('bleurt' )
def snake_case ( UpperCAmelCase : Any ):
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('sv', '', '' ) # handle pytest cli flags
class UpperCamelCase ( snake_case__ ):
"""simple docstring"""
def A( self : Union[str, Any] ,_SCREAMING_SNAKE_CASE : Optional[int] ) -> List[str]:
'''simple docstring'''
assert len(input_dict['input_ids'] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('bleurt.score._create_predictor' ) as mock_create_predictor:
A = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('bertscore' )
def snake_case ( UpperCAmelCase : str ):
import torch
def bert_cos_score_idf(UpperCAmelCase : Union[str, Any], UpperCAmelCase : Optional[int], *UpperCAmelCase : List[str], **UpperCAmelCase : List[Any] ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(UpperCAmelCase ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('bert_score.scorer.get_model' ), patch(
'bert_score.scorer.bert_cos_score_idf' ) as mock_bert_cos_score_idf:
A = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('comet' )
def snake_case ( UpperCAmelCase : str ):
def load_from_checkpoint(UpperCAmelCase : List[str] ):
class UpperCamelCase :
"""simple docstring"""
def A( self : Dict ,_SCREAMING_SNAKE_CASE : List[str] ,*_SCREAMING_SNAKE_CASE : Tuple ,**_SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
'''simple docstring'''
assert len(_SCREAMING_SNAKE_CASE ) == 2
A = [0.19, 0.92]
return scores, sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('comet.download_model' ) as mock_download_model:
A = None
with patch('comet.load_from_checkpoint' ) as mock_load_from_checkpoint:
A = load_from_checkpoint
yield
def snake_case ( ):
A = load_metric(os.path.join('metrics', 'seqeval' ) )
A = 'ERROR'
A = f'Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'
with pytest.raises(UpperCAmelCase, match=re.escape(UpperCAmelCase ) ):
metric.compute(predictions=[], references=[], scheme=UpperCAmelCase )
| 110
|
from manim import *
class UpperCamelCase ( snake_case__ ):
"""simple docstring"""
def A( self : Dict ) -> Tuple:
'''simple docstring'''
A = Rectangle(height=0.5 ,width=0.5 )
A = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
A = [mem.copy() for i in range(6 )]
A = [mem.copy() for i in range(6 )]
A = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0 )
A = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0 )
A = VGroup(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0 )
A = Text('CPU' ,font_size=2_4 )
A = Group(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0.5 ,aligned_edge=_SCREAMING_SNAKE_CASE )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_SCREAMING_SNAKE_CASE )
A = [mem.copy() for i in range(1 )]
A = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0 )
A = Text('GPU' ,font_size=2_4 )
A = Group(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0.5 ,aligned_edge=_SCREAMING_SNAKE_CASE )
gpu.align_to(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
gpu.set_x(gpu.get_x() - 1 )
self.add(_SCREAMING_SNAKE_CASE )
A = [mem.copy() for i in range(6 )]
A = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0 )
A = Text('Model' ,font_size=2_4 )
A = Group(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0.5 ,aligned_edge=_SCREAMING_SNAKE_CASE )
model.move_to([3, -1.0, 0] )
self.play(
Create(_SCREAMING_SNAKE_CASE ,run_time=1 ) ,Create(_SCREAMING_SNAKE_CASE ,run_time=1 ) ,Create(_SCREAMING_SNAKE_CASE ,run_time=1 ) ,)
A = MarkupText(
f'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' ,font_size=2_4 ,)
A = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
A = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' ,font_size=1_8 ,)
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_SCREAMING_SNAKE_CASE ,run_time=2.5 ) ,Write(_SCREAMING_SNAKE_CASE ) ,Write(_SCREAMING_SNAKE_CASE ) )
self.add(_SCREAMING_SNAKE_CASE )
A = []
A = []
A = []
for i, rect in enumerate(_SCREAMING_SNAKE_CASE ):
A = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(_SCREAMING_SNAKE_CASE ,opacity=0.7 )
cpu_target.move_to(_SCREAMING_SNAKE_CASE )
cpu_target.generate_target()
A = 0.46 / 4
A = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.02 ,direction=_SCREAMING_SNAKE_CASE )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target ,direction=_SCREAMING_SNAKE_CASE ,buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target ,direction=_SCREAMING_SNAKE_CASE ,buff=0.0 )
cpu_targs.append(_SCREAMING_SNAKE_CASE )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_SCREAMING_SNAKE_CASE ) )
second_animations.append(MoveToTarget(_SCREAMING_SNAKE_CASE ,run_time=1.5 ) )
self.play(*_SCREAMING_SNAKE_CASE )
self.play(*_SCREAMING_SNAKE_CASE )
self.wait()
| 110
| 1
|
"""simple docstring"""
def _lowerCamelCase ( UpperCAmelCase__ ) -> bool:
'''simple docstring'''
if num < 0:
return False
a__ = num
a__ = 0
while num > 0:
a__ = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 232
|
"""simple docstring"""
__magic_name__ = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def _lowerCamelCase ( ) -> None:
'''simple docstring'''
a__ = input('Enter message: ' )
a__ = input('Enter key [alphanumeric]: ' )
a__ = input('Encrypt/Decrypt [e/d]: ' )
if mode.lower().startswith('e' ):
a__ = 'encrypt'
a__ = encrypt_message(UpperCAmelCase__,UpperCAmelCase__ )
elif mode.lower().startswith('d' ):
a__ = 'decrypt'
a__ = decrypt_message(UpperCAmelCase__,UpperCAmelCase__ )
print(f'''\n{mode.title()}ed message:''' )
print(UpperCAmelCase__ )
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__ ) -> str:
'''simple docstring'''
return translate_message(UpperCAmelCase__,UpperCAmelCase__,'encrypt' )
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__ ) -> str:
'''simple docstring'''
return translate_message(UpperCAmelCase__,UpperCAmelCase__,'decrypt' )
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__ ) -> str:
'''simple docstring'''
a__ = []
a__ = 0
a__ = key.upper()
for symbol in message:
a__ = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(UpperCAmelCase__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(UpperCAmelCase__ ):
a__ = 0
else:
translated.append(UpperCAmelCase__ )
return "".join(UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 232
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
UpperCAmelCase_ : str = None
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase_ : List[Any] = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
UpperCAmelCase_ : Dict = {
'''moussaKam/mbarthez''': 10_24,
'''moussaKam/barthez''': 10_24,
'''moussaKam/barthez-orangesum-title''': 10_24,
}
UpperCAmelCase_ : List[str] = '''▁'''
class _SCREAMING_SNAKE_CASE ( __snake_case ):
snake_case__ : Any = VOCAB_FILES_NAMES
snake_case__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Dict = ["""input_ids""", """attention_mask"""]
snake_case__ : str = BarthezTokenizer
def __init__( self : Any , __lowerCamelCase : Dict=None , __lowerCamelCase : Dict=None , __lowerCamelCase : List[str]="<s>" , __lowerCamelCase : Optional[int]="</s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : Optional[Any]="<unk>" , __lowerCamelCase : Union[str, Any]="<pad>" , __lowerCamelCase : str="<mask>" , **__lowerCamelCase : List[Any] , ):
UpperCamelCase :Tuple = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
super().__init__(
__UpperCamelCase , tokenizer_file=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , **__UpperCamelCase , )
UpperCamelCase :Any = vocab_file
UpperCamelCase :Tuple = False if not self.vocab_file else True
def _A ( self : str , __lowerCamelCase : List[str] , __lowerCamelCase : Any = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase :Tuple = [self.cls_token_id]
UpperCamelCase :Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _A ( self : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int = None ):
UpperCamelCase :Tuple = [self.sep_token_id]
UpperCamelCase :Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _A ( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__UpperCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase :Optional[int] = os.path.join(
__UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ):
copyfile(self.vocab_file , __UpperCamelCase )
return (out_vocab_file,)
| 702
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Any = """trocr"""
snake_case__ : str = ["""past_key_values"""]
snake_case__ : str = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self : List[str] , __lowerCamelCase : int=50_265 , __lowerCamelCase : Tuple=1_024 , __lowerCamelCase : Dict=12 , __lowerCamelCase : Tuple=16 , __lowerCamelCase : int=4_096 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : List[Any]=512 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=0.0 , __lowerCamelCase : Any=0.0 , __lowerCamelCase : Any=2 , __lowerCamelCase : Optional[int]=0.02 , __lowerCamelCase : str=0.0 , __lowerCamelCase : Any=True , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : int=True , __lowerCamelCase : Union[str, Any]=1 , __lowerCamelCase : List[str]=0 , __lowerCamelCase : int=2 , **__lowerCamelCase : Dict , ):
UpperCamelCase :Optional[Any] = vocab_size
UpperCamelCase :str = d_model
UpperCamelCase :Dict = decoder_layers
UpperCamelCase :Tuple = decoder_attention_heads
UpperCamelCase :Tuple = decoder_ffn_dim
UpperCamelCase :List[Any] = activation_function
UpperCamelCase :Dict = max_position_embeddings
UpperCamelCase :Optional[Any] = dropout
UpperCamelCase :List[str] = attention_dropout
UpperCamelCase :int = activation_dropout
UpperCamelCase :List[str] = init_std
UpperCamelCase :int = decoder_layerdrop
UpperCamelCase :List[Any] = use_cache
UpperCamelCase :Optional[Any] = scale_embedding
UpperCamelCase :Any = use_learned_position_embeddings
UpperCamelCase :Tuple = layernorm_embedding
super().__init__(
pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , **__lowerCamelCase , )
| 590
| 0
|
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
UpperCAmelCase : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , _A , _A=768 ):
super().__init__(_A )
__A : List[str] = proj_size
__A : List[Any] = CLIPVisionModel(_A )
__A : str = PaintByExampleMapper(_A )
__A : Tuple = nn.LayerNorm(config.hidden_size )
__A : Optional[int] = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
__A : Optional[int] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def UpperCAmelCase_ ( self , _A , _A=False ):
__A : Tuple = self.model(pixel_values=_A )
__A : Union[str, Any] = clip_output.pooler_output
__A : Optional[Any] = self.mapper(latent_states[:, None] )
__A : Dict = self.final_layer_norm(_A )
__A : str = self.proj_out(_A )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class _A( nn.Module ):
"""simple docstring"""
def __init__( self , _A ):
super().__init__()
__A : Tuple = (config.num_hidden_layers + 1) // 5
__A : Optional[Any] = config.hidden_size
__A : Tuple = 1
__A : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(_A , _A , _A , activation_fn='gelu' , attention_bias=_A )
for _ in range(_A )
] )
def UpperCAmelCase_ ( self , _A ):
for block in self.blocks:
__A : Dict = block(_A )
return hidden_states
| 239
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : int = {'''configuration_ibert''': ['''IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''IBertConfig''', '''IBertOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = [
'''IBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''IBertForMaskedLM''',
'''IBertForMultipleChoice''',
'''IBertForQuestionAnswering''',
'''IBertForSequenceClassification''',
'''IBertForTokenClassification''',
'''IBertModel''',
'''IBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 239
| 1
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
def lowercase ( self: int ) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase ( self: Optional[int] ) -> str:
"""simple docstring"""
UpperCamelCase_ = 1
UpperCamelCase_ = 3
UpperCamelCase_ = (32, 32)
UpperCamelCase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_SCREAMING_SNAKE_CASE )
return image
@property
def lowercase ( self: List[str] ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def lowercase ( self: List[Any] ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def lowercase ( self: Tuple ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(_SCREAMING_SNAKE_CASE )
@property
def lowercase ( self: int ) -> List[Any]:
"""simple docstring"""
def extract(*_SCREAMING_SNAKE_CASE: Dict , **_SCREAMING_SNAKE_CASE: Any ):
class _UpperCamelCase :
def __init__( self: List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = torch.ones([0] )
def lowercase ( self: List[Any] , _SCREAMING_SNAKE_CASE: Optional[int] ) -> Dict:
"""simple docstring"""
self.pixel_values.to(_SCREAMING_SNAKE_CASE )
return self
return Out()
return extract
def lowercase ( self: Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_ = self.dummy_cond_unet
UpperCamelCase_ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=_SCREAMING_SNAKE_CASE , set_alpha_to_one=_SCREAMING_SNAKE_CASE , )
UpperCamelCase_ = self.dummy_vae
UpperCamelCase_ = self.dummy_text_encoder
UpperCamelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
UpperCamelCase_ = StableDiffusionPipeline(
unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
UpperCamelCase_ = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = "A painting of a squirrel eating a burger"
UpperCamelCase_ = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
UpperCamelCase_ = sd_pipe([prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
UpperCamelCase_ = output.images
UpperCamelCase_ = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
UpperCamelCase_ = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=_SCREAMING_SNAKE_CASE , )[0]
UpperCamelCase_ = image[0, -3:, -3:, -1]
UpperCamelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase_ = np.array([0.57_56, 0.61_18, 0.50_05, 0.50_41, 0.54_71, 0.47_26, 0.49_76, 0.48_65, 0.48_64] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase ( self: Any ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_ = self.dummy_cond_unet
UpperCamelCase_ = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.dummy_vae
UpperCamelCase_ = self.dummy_text_encoder
UpperCamelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
UpperCamelCase_ = StableDiffusionPipeline(
unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
UpperCamelCase_ = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = "A painting of a squirrel eating a burger"
UpperCamelCase_ = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
UpperCamelCase_ = sd_pipe([prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
UpperCamelCase_ = output.images
UpperCamelCase_ = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
UpperCamelCase_ = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=_SCREAMING_SNAKE_CASE , )[0]
UpperCamelCase_ = image[0, -3:, -3:, -1]
UpperCamelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase_ = np.array([0.51_25, 0.57_16, 0.48_28, 0.50_60, 0.56_50, 0.47_68, 0.51_85, 0.48_95, 0.49_93] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase ( self: Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = StableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-lms-pipe" , safety_checker=_SCREAMING_SNAKE_CASE )
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert isinstance(pipe.scheduler , _SCREAMING_SNAKE_CASE )
assert pipe.safety_checker is None
UpperCamelCase_ = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCamelCase_ = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def lowercase ( self: List[Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = self.dummy_cond_unet
UpperCamelCase_ = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.dummy_vae
UpperCamelCase_ = self.dummy_text_encoder
UpperCamelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# put models in fp16
UpperCamelCase_ = unet.half()
UpperCamelCase_ = vae.half()
UpperCamelCase_ = bert.half()
# make sure here that pndm scheduler skips prk
UpperCamelCase_ = StableDiffusionPipeline(
unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
UpperCamelCase_ = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = "A painting of a squirrel eating a burger"
UpperCamelCase_ = sd_pipe([prompt] , num_inference_steps=2 , output_type="np" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
def lowercase ( self: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self: Any ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCamelCase_ = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = (
"portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"
" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"
" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"
" children from bahnhof zoo, detailed "
)
UpperCamelCase_ = 4003660346
UpperCamelCase_ = 7
# without safety guidance (sld_guidance_scale = 0)
UpperCamelCase_ = torch.manual_seed(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase_ = output.images
UpperCamelCase_ = image[0, -3:, -3:, -1]
UpperCamelCase_ = [0.22_78, 0.22_31, 0.22_49, 0.23_33, 0.23_03, 0.18_85, 0.22_73, 0.21_44, 0.21_76]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
UpperCamelCase_ = torch.manual_seed(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase_ = output.images
UpperCamelCase_ = image[0, -3:, -3:, -1]
UpperCamelCase_ = [0.23_83, 0.22_76, 0.2_36, 0.21_92, 0.21_86, 0.20_53, 0.19_71, 0.19_01, 0.17_19]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase ( self: Optional[int] ) -> str:
"""simple docstring"""
UpperCamelCase_ = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCamelCase_ = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = "padme amidala taking a bath artwork, safe for work, no nudity"
UpperCamelCase_ = 2734971755
UpperCamelCase_ = 7
UpperCamelCase_ = torch.manual_seed(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase_ = output.images
UpperCamelCase_ = image[0, -3:, -3:, -1]
UpperCamelCase_ = [0.35_02, 0.36_22, 0.33_96, 0.36_42, 0.34_78, 0.33_18, 0.35, 0.33_48, 0.32_97]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
UpperCamelCase_ = torch.manual_seed(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase_ = output.images
UpperCamelCase_ = image[0, -3:, -3:, -1]
UpperCamelCase_ = [0.55_31, 0.52_06, 0.48_95, 0.51_56, 0.51_82, 0.47_51, 0.48_02, 0.48_03, 0.44_43]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase ( self: List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" )
UpperCamelCase_ = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = (
"the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."
" leyendecker"
)
UpperCamelCase_ = 1044355234
UpperCamelCase_ = 12
UpperCamelCase_ = torch.manual_seed(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase_ = output.images
UpperCamelCase_ = image[0, -3:, -3:, -1]
UpperCamelCase_ = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
UpperCamelCase_ = torch.manual_seed(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase_ = output.images
UpperCamelCase_ = image[0, -3:, -3:, -1]
UpperCamelCase_ = np.array([0.58_18, 0.62_85, 0.68_35, 0.60_19, 0.6_25, 0.67_54, 0.60_96, 0.63_34, 0.65_61] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 371
|
_UpperCAmelCase = {
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
_UpperCAmelCase = {value: key for key, value in encode_dict.items()}
def lowerCAmelCase_ ( UpperCamelCase_ ) -> str:
UpperCamelCase_ = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def lowerCAmelCase_ ( UpperCamelCase_ ) -> str:
if set(UpperCamelCase_ ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces" )
UpperCamelCase_ = ""
for word in coded.split():
while len(UpperCamelCase_ ) != 0:
decoded += decode_dict[word[:5]]
UpperCamelCase_ = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 371
| 1
|
"""simple docstring"""
import datasets
from .evaluate import evaluate
__snake_case : Any = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n'
__snake_case : int = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n'
__snake_case : Union[str, Any] = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> int:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {"id": datasets.Value("string"), "prediction_text": datasets.Value("string")},
"references": {
"id": datasets.Value("string"),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string"),
"answer_start": datasets.Value("int32"),
}),
},
}) , codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , )
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
__lowerCAmelCase : List[str] = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
__lowerCAmelCase : List[Any] = evaluate(dataset=A_ , predictions=A_)
return score
| 293
|
import pytest
import datasets
# Import fixture modules as plugins
__snake_case = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def _A ( _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def _A ( _lowercase ) -> str:
"""simple docstring"""
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=_lowercase )
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
__UpperCamelCase = tmp_path_factory.getbasetemp() / 'cache'
__UpperCamelCase = test_hf_cache_home / 'datasets'
__UpperCamelCase = test_hf_cache_home / 'metrics'
__UpperCamelCase = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_lowercase ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_lowercase ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_lowercase ) )
__UpperCamelCase = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_lowercase ) )
__UpperCamelCase = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_lowercase ) )
@pytest.fixture(autouse=_lowercase , scope='session' )
def _A ( ) -> Dict:
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=_lowercase )
def _A ( _lowercase ) -> Tuple:
"""simple docstring"""
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _lowercase )
@pytest.fixture
def _A ( _lowercase ) -> Any:
"""simple docstring"""
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _lowercase )
| 1
| 0
|
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
UpperCAmelCase_ = [
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def SCREAMING_SNAKE_CASE ( ):
__a = Github(os.environ['GITHUB_TOKEN'] )
__a = g.get_repo('huggingface/diffusers' )
__a = repo.get_issues(state='open' )
for issue in open_issues:
__a = sorted(issue.get_comments() , key=lambda a_ : i.created_at , reverse=a_ )
__a = comments[0] if len(a_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 490
|
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = TypeVar("DatasetType", Dataset, IterableDataset)
def SCREAMING_SNAKE_CASE ( a_ : List[DatasetType] , a_ : Optional[List[float]] = None , a_ : Optional[int] = None , a_ : Optional[DatasetInfo] = None , a_ : Optional[NamedSplit] = None , a_ : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ):
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(a_ ):
if not isinstance(a_ , (Dataset, IterableDataset) ):
if isinstance(a_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'is an empty dataset dictionary.' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(a_ )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(a_ ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(a_ ).__name__}." )
if i == 0:
__a , __a = (
(Dataset, IterableDataset) if isinstance(a_ , a_ ) else (IterableDataset, Dataset)
)
elif not isinstance(a_ , a_ ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
a_ , a_ , a_ , info=a_ , split=a_ , stopping_strategy=a_ )
else:
return _interleave_iterable_datasets(
a_ , a_ , a_ , info=a_ , split=a_ , stopping_strategy=a_ )
def SCREAMING_SNAKE_CASE ( a_ : List[DatasetType] , a_ : Optional[DatasetInfo] = None , a_ : Optional[NamedSplit] = None , a_ : int = 0 , ):
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(a_ ):
if not isinstance(a_ , (Dataset, IterableDataset) ):
if isinstance(a_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'is an empty dataset dictionary.' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(a_ )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(a_ ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(a_ ).__name__}." )
if i == 0:
__a , __a = (
(Dataset, IterableDataset) if isinstance(a_ , a_ ) else (IterableDataset, Dataset)
)
elif not isinstance(a_ , a_ ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(a_ , info=a_ , split=a_ , axis=a_ )
else:
return _concatenate_iterable_datasets(a_ , info=a_ , split=a_ , axis=a_ )
| 490
| 1
|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class lowerCAmelCase__ ( unittest.TestCase , __magic_name__ ):
'''simple docstring'''
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =load_tool('''text-classification''' )
self.tool.setup()
__A =load_tool('''text-classification''' , remote=lowercase__ )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.tool('''That\'s quite cool''' , ['''positive''', '''negative'''] )
self.assertEqual(lowercase__ , '''positive''' )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.remote_tool('''That\'s quite cool''' , ['''positive''', '''negative'''] )
self.assertEqual(lowercase__ , '''positive''' )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.tool(text='''That\'s quite cool''' , labels=['''positive''', '''negative'''] )
self.assertEqual(lowercase__ , '''positive''' )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.remote_tool(text='''That\'s quite cool''' , labels=['''positive''', '''negative'''] )
self.assertEqual(lowercase__ , '''positive''' )
| 184
|
def __lowerCAmelCase ( A , A , A , A ):
# Return True if there is node that has not iterated.
UpperCAmelCase_ = [False] * len(A )
UpperCAmelCase_ = []
queue.append(A )
UpperCAmelCase_ = True
while queue:
UpperCAmelCase_ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(A )
UpperCAmelCase_ = True
UpperCAmelCase_ = u
return visited[t]
def __lowerCAmelCase ( A , A , A ):
# This array is filled by BFS and to store path
UpperCAmelCase_ = [-1] * (len(A ))
UpperCAmelCase_ = 0
while bfs(A , A , A , A ):
UpperCAmelCase_ = float("Inf" )
UpperCAmelCase_ = sink
while s != source:
# Find the minimum value in select path
UpperCAmelCase_ = min(A , graph[parent[s]][s] )
UpperCAmelCase_ = parent[s]
max_flow += path_flow
UpperCAmelCase_ = sink
while v != source:
UpperCAmelCase_ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCAmelCase_ = parent[v]
return max_flow
_a: Any = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_a , _a: Optional[int] = 0, 5
print(ford_fulkerson(graph, source, sink))
| 162
| 0
|
'''simple docstring'''
import datasets
_lowerCAmelCase = "\\n@InProceedings{conneau2018xnli,\n author = \"Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin\",\n title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",\n booktitle = \"Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n location = \"Brussels, Belgium\",\n}\n"
_lowerCAmelCase = "\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n"
_lowerCAmelCase = "\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n 'accuracy': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric(\"xnli\")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n"
def _lowerCAmelCase ( lowercase : Union[str, Any] , lowercase : Union[str, Any] ) ->str:
"""simple docstring"""
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
"""simple docstring"""
def snake_case_( self )-> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def snake_case_( self , _lowerCamelCase , _lowerCamelCase )-> Union[str, Any]:
return {"accuracy": simple_accuracy(_lowerCamelCase , _lowerCamelCase )}
| 318
|
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCAmelCase ( lowercase : List[str] , lowercase : Dict , lowercase : int ) ->List[Any]:
"""simple docstring"""
lowercase__ = BertConfig.from_json_file(lowercase )
print(F'''Building PyTorch model from configuration: {config}''' )
lowercase__ = BertForPreTraining(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(lowercase , lowercase , lowercase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , lowercase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_lowerCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 318
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowercase : List[Any] = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any = ['''OwlViTFeatureExtractor''']
__lowercase : str = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__lowercase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 36
|
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : List[str] = (DPMSolverSinglestepScheduler,)
__lowerCamelCase : int = (("num_inference_steps", 25),)
def _snake_case ( self , **_lowerCAmelCase ) -> Any:
_lowerCAmelCase = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"prediction_type": "epsilon",
"thresholding": False,
"sample_max_value": 1.0,
"algorithm_type": "dpmsolver++",
"solver_type": "midpoint",
"lambda_min_clipped": -float("inf" ),
"variance_type": None,
}
config.update(**_lowerCAmelCase )
return config
def _snake_case ( self , _lowerCAmelCase=0 , **_lowerCAmelCase ) -> List[Any]:
_lowerCAmelCase = dict(self.forward_default_kwargs )
_lowerCAmelCase = kwargs.pop("num_inference_steps" , _lowerCAmelCase )
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
_lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config(**_lowerCAmelCase )
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residuals
_lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowerCAmelCase )
_lowerCAmelCase = scheduler_class.from_pretrained(_lowerCAmelCase )
new_scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residuals
_lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase , _lowerCAmelCase = sample, sample
for t in range(_lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ):
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
_lowerCAmelCase = new_scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _snake_case ( self ) -> int:
pass
def _snake_case ( self , _lowerCAmelCase=0 , **_lowerCAmelCase ) -> Optional[int]:
_lowerCAmelCase = dict(self.forward_default_kwargs )
_lowerCAmelCase = kwargs.pop("num_inference_steps" , _lowerCAmelCase )
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
_lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowerCAmelCase )
_lowerCAmelCase = scheduler_class.from_pretrained(_lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
_lowerCAmelCase = new_scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _snake_case ( self , _lowerCAmelCase=None , **_lowerCAmelCase ) -> Tuple:
if scheduler is None:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(**_lowerCAmelCase )
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(**_lowerCAmelCase )
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = 10
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
return sample
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_lowerCAmelCase = 50
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCAmelCase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2574 ) < 1E-3
def _snake_case ( self ) -> Optional[Any]:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def _snake_case ( self ) -> List[Any]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_lowerCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_lowerCAmelCase = self.full_loop(scheduler=_lowerCAmelCase )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
_lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_lowerCAmelCase = self.full_loop(scheduler=_lowerCAmelCase )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def _snake_case ( self ) -> str:
self.check_over_configs(thresholding=_lowerCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_lowerCAmelCase , prediction_type=_lowerCAmelCase , sample_max_value=_lowerCAmelCase , algorithm_type="dpmsolver++" , solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , )
def _snake_case ( self ) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , prediction_type=_lowerCAmelCase , algorithm_type=_lowerCAmelCase , )
_lowerCAmelCase = self.full_loop(
solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , prediction_type=_lowerCAmelCase , algorithm_type=_lowerCAmelCase , )
assert not torch.isnan(_lowerCAmelCase ).any(), "Samples have nan numbers"
def _snake_case ( self ) -> Optional[Any]:
self.check_over_configs(lower_order_final=_lowerCAmelCase )
self.check_over_configs(lower_order_final=_lowerCAmelCase )
def _snake_case ( self ) -> Optional[Any]:
self.check_over_configs(lambda_min_clipped=-float("inf" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def _snake_case ( self ) -> str:
self.check_over_configs(variance_type=_lowerCAmelCase )
self.check_over_configs(variance_type="learned_range" )
def _snake_case ( self ) -> int:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_lowerCAmelCase , time_step=0 )
def _snake_case ( self ) -> Any:
_lowerCAmelCase = self.full_loop()
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = self.full_loop(use_karras_sigmas=_lowerCAmelCase )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2248 ) < 1E-3
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.full_loop(prediction_type="v_prediction" )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.1453 ) < 1E-3
def _snake_case ( self ) -> Any:
_lowerCAmelCase = self.full_loop(prediction_type="v_prediction" , use_karras_sigmas=_lowerCAmelCase )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.0649 ) < 1E-3
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(thresholding=_lowerCAmelCase , dynamic_thresholding_ratio=0 )
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = 10
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(_lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
| 18
| 0
|
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : int = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
__lowerCamelCase : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase :
UpperCAmelCase : str = field(
default=_lowercase , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(_lowercase )} )
UpperCAmelCase : str = field(
default=_lowercase , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
UpperCAmelCase : int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase : int = field(
default=128 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , )
UpperCAmelCase : int = field(
default=64 , metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
} , )
UpperCAmelCase : int = field(
default=30 , metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
} , )
UpperCAmelCase : bool = field(
default=_lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
UpperCAmelCase : bool = field(
default=_lowercase , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
UpperCAmelCase : float = field(
default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
UpperCAmelCase : int = field(
default=20 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
UpperCAmelCase : int = field(
default=0 , metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
} , )
UpperCAmelCase : int = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} )
class UpperCAmelCase ( _lowercase ):
UpperCAmelCase : List[str] = '''train'''
UpperCAmelCase : int = '''dev'''
class UpperCAmelCase ( _lowercase ):
UpperCAmelCase : SquadDataTrainingArguments
UpperCAmelCase : List[SquadFeatures]
UpperCAmelCase : Split
UpperCAmelCase : bool
def __init__(self : Union[str, Any] , A__ : SquadDataTrainingArguments , A__ : PreTrainedTokenizer , A__ : Optional[int] = None , A__ : Union[str, Split] = Split.train , A__ : Optional[bool] = False , A__ : Optional[str] = None , A__ : Optional[str] = "pt" , ) -> Any:
lowercase = args
lowercase = is_language_sensitive
lowercase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(A__ , A__ ):
try:
lowercase = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
lowercase = mode
# Load data features from cache or dataset file
lowercase = "v2" if args.version_2_with_negative else "v1"
lowercase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowercase = cached_features_file + ".lock"
with FileLock(A__ ):
if os.path.exists(A__ ) and not args.overwrite_cache:
lowercase = time.time()
lowercase = torch.load(A__ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowercase = self.old_features["features"]
lowercase = self.old_features.get("dataset" , A__ )
lowercase = self.old_features.get("examples" , A__ )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
" future run" )
else:
if mode == Split.dev:
lowercase = self.processor.get_dev_examples(args.data_dir )
else:
lowercase = self.processor.get_train_examples(args.data_dir )
lowercase , lowercase = squad_convert_examples_to_features(
examples=self.examples , tokenizer=A__ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=A__ , )
lowercase = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , A__ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__(self : str ) -> Union[str, Any]:
return len(self.features )
def __getitem__(self : Optional[Any] , A__ : Union[str, Any] ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
lowercase = self.features[i]
lowercase = torch.tensor(feature.input_ids , dtype=torch.long )
lowercase = torch.tensor(feature.attention_mask , dtype=torch.long )
lowercase = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowercase = torch.tensor(feature.cls_index , dtype=torch.long )
lowercase = torch.tensor(feature.p_mask , dtype=torch.float )
lowercase = torch.tensor(feature.is_impossible , dtype=torch.float )
lowercase = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowercase = torch.tensor(feature.start_position , dtype=torch.long )
lowercase = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 459
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : str = {
"facebook/data2vec-text-base": "https://huggingface.co/data2vec/resolve/main/config.json",
}
class UpperCAmelCase ( _lowercase ):
UpperCAmelCase : List[str] = '''data2vec-text'''
def __init__(self : List[str] , A__ : str=3_0_5_2_2 , A__ : Tuple=7_6_8 , A__ : Any=1_2 , A__ : Optional[int]=1_2 , A__ : str=3_0_7_2 , A__ : List[str]="gelu" , A__ : List[Any]=0.1 , A__ : Optional[int]=0.1 , A__ : Union[str, Any]=5_1_2 , A__ : Any=2 , A__ : str=0.0_2 , A__ : int=1e-12 , A__ : Union[str, Any]=1 , A__ : Optional[int]=0 , A__ : Union[str, Any]=2 , A__ : Optional[int]="absolute" , A__ : Tuple=True , A__ : int=None , **A__ : Any , ) -> Any:
super().__init__(pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ )
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = position_embedding_type
lowercase = use_cache
lowercase = classifier_dropout
class UpperCAmelCase ( _lowercase ):
@property
def UpperCAmelCase__ (self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowercase = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 459
| 1
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
a = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
a = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
lowerCamelCase : Optional[str] = field(
default=snake_case__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowerCamelCase : Optional[str] = field(
default=snake_case__ , metadata={'help': 'The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'} , )
lowerCamelCase : Optional[str] = field(default=snake_case__ , metadata={'help': 'A folder containing the training data.'} )
lowerCamelCase : Optional[str] = field(default=snake_case__ , metadata={'help': 'A folder containing the validation data.'} )
lowerCamelCase : Optional[float] = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
lowerCamelCase : int = field(default=32 , metadata={'help': 'The size of the square patches to use for masking.'} )
lowerCamelCase : float = field(
default=0.6 , metadata={'help': 'Percentage of patches to mask.'} , )
lowerCamelCase : Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCamelCase : Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Union[str, Any] = {}
if self.train_dir is not None:
_lowerCAmelCase :Tuple = self.train_dir
if self.validation_dir is not None:
_lowerCAmelCase :List[str] = self.validation_dir
_lowerCAmelCase :List[Any] = data_files if data_files else None
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : str = field(
default=snake_case__ , metadata={
'help': (
'The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '
'checkpoint identifier on the hub. '
'Don\'t set if you want to train a model from scratch.'
)
} , )
lowerCamelCase : Optional[str] = field(
default=snake_case__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(snake_case__ )} , )
lowerCamelCase : Optional[str] = field(
default=snake_case__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCamelCase : Optional[str] = field(
default=snake_case__ , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
lowerCamelCase : Optional[str] = field(
default=snake_case__ , metadata={'help': 'Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'} , )
lowerCamelCase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowerCamelCase : str = field(default=snake_case__ , metadata={'help': 'Name or path of preprocessor config.'} )
lowerCamelCase : bool = field(
default=snake_case__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
lowerCamelCase : Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'
)
} , )
lowerCamelCase : Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'
)
} , )
lowerCamelCase : Optional[int] = field(
default=snake_case__ , metadata={'help': 'Stride to use for the encoder.'} , )
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: str , _UpperCAmelCase: Optional[int]=192 , _UpperCAmelCase: Optional[Any]=32 , _UpperCAmelCase: Dict=4 , _UpperCAmelCase: int=0.6 ):
_lowerCAmelCase :Tuple = input_size
_lowerCAmelCase :Union[str, Any] = mask_patch_size
_lowerCAmelCase :Optional[Any] = model_patch_size
_lowerCAmelCase :str = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('Input size must be divisible by mask patch size' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('Mask patch size must be divisible by model patch size' )
_lowerCAmelCase :Any = self.input_size // self.mask_patch_size
_lowerCAmelCase :Optional[int] = self.mask_patch_size // self.model_patch_size
_lowerCAmelCase :List[str] = self.rand_size**2
_lowerCAmelCase :Optional[Any] = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self: str ):
_lowerCAmelCase :Tuple = np.random.permutation(self.token_count )[: self.mask_count]
_lowerCAmelCase :List[Any] = np.zeros(self.token_count , dtype=_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = 1
_lowerCAmelCase :Optional[int] = mask.reshape((self.rand_size, self.rand_size) )
_lowerCAmelCase :List[str] = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def UpperCamelCase_( __magic_name__ : int ):
"""simple docstring"""
_lowerCAmelCase :Any = torch.stack([example['pixel_values'] for example in examples] )
_lowerCAmelCase :Dict = torch.stack([example['mask'] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase :Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Tuple = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mim' , __magic_name__ , __magic_name__ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_lowerCAmelCase :List[str] = training_args.get_process_log_level()
logger.setLevel(__magic_name__ )
transformers.utils.logging.set_verbosity(__magic_name__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_lowerCAmelCase :int = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCAmelCase :int = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
_lowerCAmelCase :int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
_lowerCAmelCase :Optional[Any] = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __magic_name__ ) and data_args.train_val_split > 0.0:
_lowerCAmelCase :Dict = ds['train'].train_test_split(data_args.train_val_split )
_lowerCAmelCase :Dict = split['train']
_lowerCAmelCase :Optional[Any] = split['test']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCAmelCase :str = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
_lowerCAmelCase :Tuple = AutoConfig.from_pretrained(model_args.config_name_or_path , **__magic_name__ )
elif model_args.model_name_or_path:
_lowerCAmelCase :int = AutoConfig.from_pretrained(model_args.model_name_or_path , **__magic_name__ )
else:
_lowerCAmelCase :Optional[int] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(__magic_name__ , 'decoder_type' ):
_lowerCAmelCase :Tuple = 'simmim'
# adapt config
_lowerCAmelCase :Union[str, Any] = model_args.image_size if model_args.image_size is not None else config.image_size
_lowerCAmelCase :Tuple = model_args.patch_size if model_args.patch_size is not None else config.patch_size
_lowerCAmelCase :str = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'image_size': model_args.image_size,
'patch_size': model_args.patch_size,
'encoder_stride': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
_lowerCAmelCase :Optional[Any] = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **__magic_name__ )
elif model_args.model_name_or_path:
_lowerCAmelCase :str = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **__magic_name__ )
else:
_lowerCAmelCase :Union[str, Any] = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
_lowerCAmelCase :int = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
_lowerCAmelCase :str = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
_lowerCAmelCase :Any = AutoModelForMaskedImageModeling.from_config(__magic_name__ )
if training_args.do_train:
_lowerCAmelCase :Optional[Any] = ds['train'].column_names
else:
_lowerCAmelCase :str = ds['validation'].column_names
if data_args.image_column_name is not None:
_lowerCAmelCase :int = data_args.image_column_name
elif "image" in column_names:
_lowerCAmelCase :Union[str, Any] = 'image'
elif "img" in column_names:
_lowerCAmelCase :Optional[Any] = 'img'
else:
_lowerCAmelCase :Dict = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
_lowerCAmelCase :str = Compose(
[
Lambda(lambda __magic_name__ : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
_lowerCAmelCase :List[str] = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(__magic_name__ : List[str] ):
_lowerCAmelCase :Optional[Any] = [transforms(__magic_name__ ) for image in examples[image_column_name]]
_lowerCAmelCase :Dict = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
_lowerCAmelCase :Dict = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__magic_name__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
_lowerCAmelCase :Optional[Any] = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__magic_name__ )
# Initialize our trainer
_lowerCAmelCase :Any = Trainer(
model=__magic_name__ , args=__magic_name__ , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=__magic_name__ , data_collator=__magic_name__ , )
# Training
if training_args.do_train:
_lowerCAmelCase :Any = None
if training_args.resume_from_checkpoint is not None:
_lowerCAmelCase :List[str] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowerCAmelCase :List[Any] = last_checkpoint
_lowerCAmelCase :Optional[Any] = trainer.train(resume_from_checkpoint=__magic_name__ )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_lowerCAmelCase :Optional[int] = trainer.evaluate()
trainer.log_metrics('eval' , __magic_name__ )
trainer.save_metrics('eval' , __magic_name__ )
# Write model card and (optionally) push to hub
_lowerCAmelCase :List[str] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'masked-image-modeling',
'dataset': data_args.dataset_name,
'tags': ['masked-image-modeling'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__magic_name__ )
else:
trainer.create_model_card(**__magic_name__ )
if __name__ == "__main__":
main()
| 687
|
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :Optional[int] = 10
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :str = [1, 2, 3, 4]
_lowerCAmelCase :Union[str, Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
_lowerCAmelCase :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :Dict = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
_lowerCAmelCase :Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :List[str] = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
_lowerCAmelCase , _lowerCAmelCase :Optional[Any] = process_story(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , [] )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Optional[int] = ''
_lowerCAmelCase , _lowerCAmelCase :str = process_story(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , [] )
self.assertEqual(_UpperCAmelCase , [] )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :Optional[Any] = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
_lowerCAmelCase , _lowerCAmelCase :Optional[int] = process_story(_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Optional[int] = ['It was the best of times.']
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase :Union[str, Any] = torch.tensor([1, 2, 3, 4] )
_lowerCAmelCase :List[Any] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 0 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :List[Any] = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
_lowerCAmelCase :Optional[int] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 23 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
_lowerCAmelCase :List[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 1 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :List[str] = 101
_lowerCAmelCase :Dict = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
_lowerCAmelCase :int = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
_lowerCAmelCase :List[str] = compute_token_type_ids(_UpperCAmelCase , _UpperCAmelCase )
np.testing.assert_array_equal(_UpperCAmelCase , _UpperCAmelCase )
| 687
| 1
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : str ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =get_failure_array(__lowerCamelCase )
# 2) Step through text searching for pattern
_UpperCAmelCase : Optional[int] =0, 0 # index into text, pattern
while i < len(__lowerCamelCase ):
if pattern[j] == text[i]:
if j == (len(__lowerCamelCase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
_UpperCAmelCase : List[str] =failure[j - 1]
continue
i += 1
return False
def lowerCamelCase__ ( __lowerCamelCase : str ):
'''simple docstring'''
_UpperCAmelCase : List[str] =[0]
_UpperCAmelCase : Tuple =0
_UpperCAmelCase : int =1
while j < len(__lowerCamelCase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
_UpperCAmelCase : Optional[Any] =failure[i - 1]
continue
j += 1
failure.append(__lowerCamelCase )
return failure
if __name__ == "__main__":
# Test 1)
lowercase ='abc1abc12'
lowercase ='alskfjaldsabc1abc1abc12k23adsfabcabc'
lowercase ='alskfjaldsk23adsfabcabc'
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
lowercase ='ABABX'
lowercase ='ABABZABABYABABX'
assert kmp(pattern, text)
# Test 3)
lowercase ='AAAB'
lowercase ='ABAAAAAB'
assert kmp(pattern, text)
# Test 4)
lowercase ='abcdabcy'
lowercase ='abcxabcdabxabcdabcdabcy'
assert kmp(pattern, text)
# Test 5)
lowercase ='aabaabaaa'
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 718
|
'''simple docstring'''
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
lowercase =logging.getLogger(__name__)
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : Optional[Any] ):
'''simple docstring'''
_UpperCAmelCase : Any =np.argmax(__lowerCamelCase , axis=1 )
return np.sum(outputs == labels )
def lowerCamelCase__ ( __lowerCamelCase : List[Any] ):
'''simple docstring'''
with open(__lowerCamelCase , encoding='utf_8' ) as f:
_UpperCAmelCase : Union[str, Any] =csv.reader(__lowerCamelCase )
_UpperCAmelCase : Optional[int] =[]
next(__lowerCamelCase ) # skip the first line
for line in tqdm(__lowerCamelCase ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ):
'''simple docstring'''
_UpperCAmelCase : List[Any] =[]
for dataset in encoded_datasets:
_UpperCAmelCase : Any =len(__lowerCamelCase )
_UpperCAmelCase : Optional[Any] =np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
_UpperCAmelCase : Optional[int] =np.zeros((n_batch, 2) , dtype=np.intaa )
_UpperCAmelCase : Dict =np.full((n_batch, 2, input_len) , fill_value=-1_0_0 , dtype=np.intaa )
_UpperCAmelCase : str =np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(__lowerCamelCase ):
_UpperCAmelCase : int =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
_UpperCAmelCase : str =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
_UpperCAmelCase : Optional[int] =with_conta
_UpperCAmelCase : str =with_conta
_UpperCAmelCase : str =len(__lowerCamelCase ) - 1
_UpperCAmelCase : Optional[Any] =len(__lowerCamelCase ) - 1
_UpperCAmelCase : Any =with_conta
_UpperCAmelCase : Optional[Any] =with_conta
_UpperCAmelCase : str =mc_label
_UpperCAmelCase : Union[str, Any] =(input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(__lowerCamelCase ) for t in all_inputs ) )
return tensor_datasets
def lowerCamelCase__ ( ):
'''simple docstring'''
_UpperCAmelCase : str =argparse.ArgumentParser()
parser.add_argument('--model_name' , type=__lowerCamelCase , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=__lowerCamelCase , default='' )
parser.add_argument('--eval_dataset' , type=__lowerCamelCase , default='' )
parser.add_argument('--seed' , type=__lowerCamelCase , default=4_2 )
parser.add_argument('--num_train_epochs' , type=__lowerCamelCase , default=3 )
parser.add_argument('--train_batch_size' , type=__lowerCamelCase , default=8 )
parser.add_argument('--eval_batch_size' , type=__lowerCamelCase , default=1_6 )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=__lowerCamelCase , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=__lowerCamelCase , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=__lowerCamelCase , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=__lowerCamelCase , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=__lowerCamelCase , default=6.25e-5 )
parser.add_argument('--warmup_steps' , default=0 , type=__lowerCamelCase , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=__lowerCamelCase , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=__lowerCamelCase , default=0.01 )
parser.add_argument('--lm_coef' , type=__lowerCamelCase , default=0.9 )
parser.add_argument('--n_valid' , type=__lowerCamelCase , default=3_7_4 )
parser.add_argument('--server_ip' , type=__lowerCamelCase , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=__lowerCamelCase , default='' , help='Can be used for distant debugging.' )
_UpperCAmelCase : Optional[Any] =parser.parse_args()
print(__lowerCamelCase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCamelCase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
_UpperCAmelCase : Any =torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
_UpperCAmelCase : Tuple =torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(__lowerCamelCase , __lowerCamelCase ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
_UpperCAmelCase : List[str] =['_start_', '_delimiter_', '_classify_']
_UpperCAmelCase : Any =OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(__lowerCamelCase )
_UpperCAmelCase : str =tokenizer.convert_tokens_to_ids(__lowerCamelCase )
_UpperCAmelCase : Optional[Any] =OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(__lowerCamelCase ) )
model.to(__lowerCamelCase )
# Load and encode the datasets
def tokenize_and_encode(__lowerCamelCase : Dict ):
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(__lowerCamelCase ) )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
return obj
return [tokenize_and_encode(__lowerCamelCase ) for o in obj]
logger.info('Encoding dataset...' )
_UpperCAmelCase : int =load_rocstories_dataset(args.train_dataset )
_UpperCAmelCase : Dict =load_rocstories_dataset(args.eval_dataset )
_UpperCAmelCase : Optional[Any] =(train_dataset, eval_dataset)
_UpperCAmelCase : Dict =tokenize_and_encode(__lowerCamelCase )
# Compute the max input length for the Transformer
_UpperCAmelCase : int =model.config.n_positions // 2 - 2
_UpperCAmelCase : str =max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
_UpperCAmelCase : Tuple =min(__lowerCamelCase , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
_UpperCAmelCase : Any =pre_process_datasets(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , *__lowerCamelCase )
_UpperCAmelCase , _UpperCAmelCase : str =tensor_datasets[0], tensor_datasets[1]
_UpperCAmelCase : int =TensorDataset(*__lowerCamelCase )
_UpperCAmelCase : Optional[int] =RandomSampler(__lowerCamelCase )
_UpperCAmelCase : Optional[Any] =DataLoader(__lowerCamelCase , sampler=__lowerCamelCase , batch_size=args.train_batch_size )
_UpperCAmelCase : Dict =TensorDataset(*__lowerCamelCase )
_UpperCAmelCase : str =SequentialSampler(__lowerCamelCase )
_UpperCAmelCase : int =DataLoader(__lowerCamelCase , sampler=__lowerCamelCase , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
_UpperCAmelCase : List[Any] =args.max_steps
_UpperCAmelCase : Tuple =args.max_steps // (len(__lowerCamelCase ) // args.gradient_accumulation_steps) + 1
else:
_UpperCAmelCase : int =len(__lowerCamelCase ) // args.gradient_accumulation_steps * args.num_train_epochs
_UpperCAmelCase : int =list(model.named_parameters() )
_UpperCAmelCase : Optional[int] =['bias', 'LayerNorm.bias', 'LayerNorm.weight']
_UpperCAmelCase : int =[
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
_UpperCAmelCase : Optional[Any] =AdamW(__lowerCamelCase , lr=args.learning_rate , eps=args.adam_epsilon )
_UpperCAmelCase : Dict =get_linear_schedule_with_warmup(
__lowerCamelCase , num_warmup_steps=args.warmup_steps , num_training_steps=__lowerCamelCase )
if args.do_train:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict =0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
_UpperCAmelCase : Union[str, Any] =0
_UpperCAmelCase : Tuple =0
_UpperCAmelCase : Any =tqdm(__lowerCamelCase , desc='Training' )
for step, batch in enumerate(__lowerCamelCase ):
_UpperCAmelCase : List[str] =tuple(t.to(__lowerCamelCase ) for t in batch )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] =batch
_UpperCAmelCase : List[str] =model(__lowerCamelCase , mc_token_ids=__lowerCamelCase , lm_labels=__lowerCamelCase , mc_labels=__lowerCamelCase )
_UpperCAmelCase : List[str] =args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
_UpperCAmelCase : Union[str, Any] =(
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
_UpperCAmelCase : str ='Training loss: {:.2e} lr: {:.2e}'.format(__lowerCamelCase , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
_UpperCAmelCase : Dict =model.module if hasattr(__lowerCamelCase , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
_UpperCAmelCase : List[str] =os.path.join(args.output_dir , __lowerCamelCase )
_UpperCAmelCase : Optional[int] =os.path.join(args.output_dir , __lowerCamelCase )
torch.save(model_to_save.state_dict() , __lowerCamelCase )
model_to_save.config.to_json_file(__lowerCamelCase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
_UpperCAmelCase : int =OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
_UpperCAmelCase : Optional[int] =OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(__lowerCamelCase )
if args.do_eval:
model.eval()
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] =0, 0
_UpperCAmelCase , _UpperCAmelCase : Dict =0, 0
for batch in tqdm(__lowerCamelCase , desc='Evaluating' ):
_UpperCAmelCase : str =tuple(t.to(__lowerCamelCase ) for t in batch )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict =batch
with torch.no_grad():
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict =model(
__lowerCamelCase , mc_token_ids=__lowerCamelCase , lm_labels=__lowerCamelCase , mc_labels=__lowerCamelCase )
_UpperCAmelCase : str =mc_logits.detach().cpu().numpy()
_UpperCAmelCase : Optional[Any] =mc_labels.to('cpu' ).numpy()
_UpperCAmelCase : Tuple =accuracy(__lowerCamelCase , __lowerCamelCase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
_UpperCAmelCase : List[Any] =eval_loss / nb_eval_steps
_UpperCAmelCase : Union[str, Any] =eval_accuracy / nb_eval_examples
_UpperCAmelCase : Optional[Any] =tr_loss / nb_tr_steps if args.do_train else None
_UpperCAmelCase : Optional[Any] ={'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
_UpperCAmelCase : Optional[Any] =os.path.join(args.output_dir , 'eval_results.txt' )
with open(__lowerCamelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , __lowerCamelCase , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 331
| 0
|
from collections import Counter
from timeit import timeit
def _SCREAMING_SNAKE_CASE ( lowercase : str = "" , ):
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def _SCREAMING_SNAKE_CASE ( lowercase : str = "" ):
'''simple docstring'''
if len(lowercase ) == 0:
return True
lowerCamelCase_ = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
lowerCamelCase_ = {}
for character in lower_case_input_str:
lowerCamelCase_ = character_freq_dict.get(lowercase , 0 ) + 1
lowerCamelCase_ = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def _SCREAMING_SNAKE_CASE ( lowercase : str = "" ):
'''simple docstring'''
print('\nFor string = ' , lowercase , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(lowercase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(lowercase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
lowerCamelCase : int = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F"""{check_str} can {'' if status else 'not '}be rearranged as a palindrome""")
| 70
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ : str = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 578
| 0
|
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : int = 32 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = [0.48145466, 0.4578275, 0.40821073] , UpperCamelCase__ : Optional[Union[float, List[float]]] = [0.26862954, 0.26130258, 0.27577711] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Any=7 , UpperCamelCase__ : List[Any]=30 , UpperCamelCase__ : int=400 , UpperCamelCase__ : List[Any]=3 , ) -> str:
"""simple docstring"""
__magic_name__ = parent
__magic_name__ = do_resize
__magic_name__ = size if size is not None else {"""shortest_edge""": 288}
__magic_name__ = size_divisor
__magic_name__ = do_rescale
__magic_name__ = rescale_factor
__magic_name__ = do_normalize
__magic_name__ = do_center_crop
__magic_name__ = image_mean
__magic_name__ = image_std
__magic_name__ = do_pad
__magic_name__ = batch_size
__magic_name__ = num_channels
__magic_name__ = min_resolution
__magic_name__ = max_resolution
def _lowercase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def _lowercase ( self : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str=False ) -> Any:
"""simple docstring"""
if not batched:
__magic_name__ = self.size["""shortest_edge"""]
__magic_name__ = image_inputs[0]
if isinstance(UpperCamelCase__ , Image.Image ):
__magic_name__ , __magic_name__ = image.size
else:
__magic_name__ , __magic_name__ = image.shape[1], image.shape[2]
__magic_name__ = size / min(UpperCamelCase__ , UpperCamelCase__ )
if h < w:
__magic_name__ , __magic_name__ = size, scale * w
else:
__magic_name__ , __magic_name__ = scale * h, size
__magic_name__ = int((1333 / 800) * size )
if max(UpperCamelCase__ , UpperCamelCase__ ) > max_size:
__magic_name__ = max_size / max(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = newh * scale
__magic_name__ = neww * scale
__magic_name__ , __magic_name__ = int(newh + 0.5 ), int(neww + 0.5 )
__magic_name__ , __magic_name__ = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
__magic_name__ = []
for image in image_inputs:
__magic_name__ , __magic_name__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__magic_name__ = max(UpperCamelCase__ , key=lambda UpperCamelCase__ : item[0] )[0]
__magic_name__ = max(UpperCamelCase__ , key=lambda UpperCamelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase_ ( _A , unittest.TestCase ):
'''simple docstring'''
a__ = BridgeTowerImageProcessor if is_vision_available() else None
def _lowercase ( self : str ) -> Dict:
"""simple docstring"""
__magic_name__ = BridgeTowerImageProcessingTester(self )
@property
def _lowercase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : Tuple ) -> Dict:
"""simple docstring"""
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """image_std""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """size""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """size_divisor""" ) )
def _lowercase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
def _lowercase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__magic_name__ , __magic_name__ = self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
__magic_name__ , __magic_name__ = self.image_processor_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : str ) -> str:
"""simple docstring"""
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__magic_name__ , __magic_name__ = self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
__magic_name__ , __magic_name__ = self.image_processor_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__magic_name__ , __magic_name__ = self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
__magic_name__ , __magic_name__ = self.image_processor_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 76
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = botoa.client("""iam""" )
__magic_name__ = {
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=A_, AssumeRolePolicyDocument=json.dumps(A_, indent=2 ) )
__magic_name__ = {
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=A_, PolicyName=f'''{role_name}_policy_permission''', PolicyDocument=json.dumps(A_, indent=2 ), )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f'''role {role_name} already exists. Using existing one''' )
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = botoa.client("""iam""" )
return iam_client.get_role(RoleName=A_ )["Role"]["Arn"]
def a__ ( ):
'''simple docstring'''
__magic_name__ = _ask_options(
"""How do you want to authorize?""", ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """], A_, )
__magic_name__ = None
if credentials_configuration == 0:
__magic_name__ = _ask_field("""Enter your AWS Profile name: [default] """, default="""default""" )
__magic_name__ = aws_profile
else:
print(
"""Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"""
"""`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" )
__magic_name__ = _ask_field("""AWS Access Key ID: """ )
__magic_name__ = aws_access_key_id
__magic_name__ = _ask_field("""AWS Secret Access Key: """ )
__magic_name__ = aws_secret_access_key
__magic_name__ = _ask_field("""Enter your AWS Region: [us-east-1]""", default="""us-east-1""" )
__magic_name__ = aws_region
__magic_name__ = _ask_options(
"""Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""", ["""Provide IAM Role name""", """Create new IAM role using credentials"""], A_, )
if role_management == 0:
__magic_name__ = _ask_field("""Enter your IAM role name: """ )
else:
__magic_name__ = """accelerate_sagemaker_execution_role"""
print(f'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' )
_create_iam_role_for_sagemaker(A_ )
__magic_name__ = _ask_field(
"""Do you want to use custom Docker image? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", )
__magic_name__ = None
if is_custom_docker_image:
__magic_name__ = _ask_field("""Enter your Docker image: """, lambda A_ : str(A_ ).lower() )
__magic_name__ = _ask_field(
"""Do you want to provide SageMaker input channels with data locations? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", )
__magic_name__ = None
if is_sagemaker_inputs_enabled:
__magic_name__ = _ask_field(
"""Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """, lambda A_ : str(A_ ).lower(), )
__magic_name__ = _ask_field(
"""Do you want to enable SageMaker metrics? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", )
__magic_name__ = None
if is_sagemaker_metrics_enabled:
__magic_name__ = _ask_field(
"""Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """, lambda A_ : str(A_ ).lower(), )
__magic_name__ = _ask_options(
"""What is the distributed mode?""", ["""No distributed training""", """Data parallelism"""], _convert_sagemaker_distributed_mode, )
__magic_name__ = {}
__magic_name__ = _ask_field(
"""Do you wish to optimize your script with torch dynamo?[yes/NO]:""", _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", )
if use_dynamo:
__magic_name__ = """dynamo_"""
__magic_name__ = _ask_options(
"""Which dynamo backend would you like to use?""", [x.lower() for x in DYNAMO_BACKENDS], _convert_dynamo_backend, default=2, )
__magic_name__ = _ask_field(
"""Do you want to customize the defaults sent to torch.compile? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", )
if use_custom_options:
__magic_name__ = _ask_options(
"""Which mode do you want to use?""", A_, lambda A_ : TORCH_DYNAMO_MODES[int(A_ )], default="""default""", )
__magic_name__ = _ask_field(
"""Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", )
__magic_name__ = _ask_field(
"""Do you want to enable dynamic shape tracing? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", )
__magic_name__ = """Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
__magic_name__ = _ask_options(
A_, A_, lambda A_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(A_ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
__magic_name__ = _ask_field(A_, lambda A_ : str(A_ ).lower(), default="""ml.p3.2xlarge""" )
__magic_name__ = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
__magic_name__ = _ask_field(
"""How many machines do you want use? [1]: """, A_, default=1, )
__magic_name__ = _ask_options(
"""Do you wish to use FP16 or BF16 (mixed precision)?""", ["""no""", """fp16""", """bf16""", """fp8"""], _convert_mixed_precision, )
if use_dynamo and mixed_precision == "no":
print(
"""Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" )
return SageMakerConfig(
image_uri=A_, compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER, distributed_type=A_, use_cpu=A_, dynamo_config=A_, eca_instance_type=A_, profile=A_, region=A_, iam_role_name=A_, mixed_precision=A_, num_machines=A_, sagemaker_inputs_file=A_, sagemaker_metrics_file=A_, )
| 76
| 1
|
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
lowerCAmelCase : Union[str, Any] = """0.12""" # assumed parallelism: 8
@require_flax
@is_staging_test
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def _lowerCAmelCase ( cls ):
"""simple docstring"""
lowerCamelCase = TOKEN
HfFolder.save_token(_a )
@classmethod
def _lowerCAmelCase ( cls ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="""test-model-flax""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-model-flax-org""" )
except HTTPError:
pass
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
lowerCamelCase = FlaxBertModel(_a )
model.push_to_hub("""test-model-flax""" , use_auth_token=self._token )
lowerCamelCase = FlaxBertModel.from_pretrained(f'{USER}/test-model-flax' )
lowerCamelCase = flatten_dict(unfreeze(model.params ) )
lowerCamelCase = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCamelCase = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1e-3 , msg=f'{key} not identical' )
# Reset repo
delete_repo(token=self._token , repo_id="""test-model-flax""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_a , repo_id="""test-model-flax""" , push_to_hub=_a , use_auth_token=self._token )
lowerCamelCase = FlaxBertModel.from_pretrained(f'{USER}/test-model-flax' )
lowerCamelCase = flatten_dict(unfreeze(model.params ) )
lowerCamelCase = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCamelCase = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1e-3 , msg=f'{key} not identical' )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
lowerCamelCase = FlaxBertModel(_a )
model.push_to_hub("""valid_org/test-model-flax-org""" , use_auth_token=self._token )
lowerCamelCase = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
lowerCamelCase = flatten_dict(unfreeze(model.params ) )
lowerCamelCase = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCamelCase = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1e-3 , msg=f'{key} not identical' )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-model-flax-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_a , repo_id="""valid_org/test-model-flax-org""" , push_to_hub=_a , use_auth_token=self._token )
lowerCamelCase = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
lowerCamelCase = flatten_dict(unfreeze(model.params ) )
lowerCamelCase = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCamelCase = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1e-3 , msg=f'{key} not identical' )
def a__ ( snake_case__ , snake_case__ ) -> int:
lowerCamelCase = True
lowerCamelCase = flatten_dict(modela.params )
lowerCamelCase = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
lowerCamelCase = False
return models_are_equal
@require_flax
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
lowerCamelCase = FlaxBertModel(_a )
lowerCamelCase = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_a , _a ) )
with self.assertRaises(_a ):
lowerCamelCase = FlaxBertModel.from_pretrained(_a )
lowerCamelCase = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertTrue(check_models_equal(_a , _a ) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
lowerCamelCase = FlaxBertModel(_a )
lowerCamelCase = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_a , _a ) , max_shard_size="""10KB""" )
with self.assertRaises(_a ):
lowerCamelCase = FlaxBertModel.from_pretrained(_a )
lowerCamelCase = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertTrue(check_models_equal(_a , _a ) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = """bert"""
lowerCamelCase = """hf-internal-testing/tiny-random-bert-subfolder"""
with self.assertRaises(_a ):
lowerCamelCase = FlaxBertModel.from_pretrained(_a )
lowerCamelCase = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertIsNotNone(_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = """bert"""
lowerCamelCase = """hf-internal-testing/tiny-random-bert-sharded-subfolder"""
with self.assertRaises(_a ):
lowerCamelCase = FlaxBertModel.from_pretrained(_a )
lowerCamelCase = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertIsNotNone(_a )
| 543
|
"""simple docstring"""
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def a__ ( snake_case__ ) -> Dict[str, torch.Tensor]:
lowerCamelCase = []
lowerCamelCase = []
lowerCamelCase = []
for rt in rc.restypes:
lowerCamelCase = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
lowerCamelCase = {name: i for i, name in enumerate(snake_case__ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
lowerCamelCase = torch.tensor(
snake_case__ , dtype=torch.intaa , device=protein["""aatype"""].device , )
lowerCamelCase = torch.tensor(
snake_case__ , dtype=torch.intaa , device=protein["""aatype"""].device , )
lowerCamelCase = torch.tensor(
snake_case__ , dtype=torch.floataa , device=protein["""aatype"""].device , )
lowerCamelCase = protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
lowerCamelCase = restype_atomaa_to_atomaa[protein_aatype]
lowerCamelCase = restype_atomaa_mask[protein_aatype]
lowerCamelCase = residx_atomaa_mask
lowerCamelCase = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
lowerCamelCase = restype_atomaa_to_atomaa[protein_aatype]
lowerCamelCase = residx_atomaa_to_atomaa.long()
# create the corresponding mask
lowerCamelCase = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
lowerCamelCase = rc.restype_atoa[restype_letter]
lowerCamelCase = rc.residue_atoms[restype_name]
for atom_name in atom_names:
lowerCamelCase = rc.atom_order[atom_name]
lowerCamelCase = 1
lowerCamelCase = restype_atomaa_mask[protein_aatype]
lowerCamelCase = residx_atomaa_mask
return protein
def a__ ( snake_case__ ) -> Dict[str, np.ndarray]:
lowerCamelCase = tree_map(lambda snake_case__ : torch.tensor(snake_case__ , device=batch["""aatype"""].device ) , snake_case__ , np.ndarray )
lowerCamelCase = tensor_tree_map(lambda snake_case__ : np.array(snake_case__ ) , make_atomaa_masks(snake_case__ ) )
return out
| 543
| 1
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
snake_case__ : List[str] = logging.get_logger(__name__)
snake_case__ : Optional[Any] = {'vocab_file': 'spiece.model'}
snake_case__ : Optional[int] = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : Dict , __a : str=False , __a : Optional[Any]=True , __a : List[Any]=False , __a : Dict="<s>" , __a : Any="</s>" , __a : Union[str, Any]="<unk>" , __a : Union[str, Any]="<sep>" , __a : int="<pad>" , __a : List[str]="<cls>" , __a : Dict="<mask>" , __a : str=["<eop>", "<eod>"] , __a : Optional[Dict[str, Any]] = None , **__a : Optional[Any] , ) ->None:
lowerCamelCase_ : str = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
lowerCamelCase_ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__a , remove_space=__a , keep_accents=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , additional_special_tokens=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
lowerCamelCase_ : Tuple = 3
lowerCamelCase_ : List[str] = do_lower_case
lowerCamelCase_ : int = remove_space
lowerCamelCase_ : Dict = keep_accents
lowerCamelCase_ : List[Any] = vocab_file
lowerCamelCase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__a )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
lowerCamelCase_ : List[str] = jieba
lowerCamelCase_ : Dict = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _lowerCAmelCase ( self : Any ) ->Union[str, Any]:
return len(self.sp_model )
def _lowerCAmelCase ( self : List[Any] ) ->Tuple:
lowerCamelCase_ : Union[str, Any] = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ) ->List[str]:
lowerCamelCase_ : Optional[int] = self.__dict__.copy()
lowerCamelCase_ : int = None
return state
def __setstate__( self : Any , __a : Any ) ->int:
lowerCamelCase_ : Optional[int] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCamelCase_ : int = {}
lowerCamelCase_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCAmelCase ( self : List[str] , __a : Any ) ->Optional[Any]:
if self.remove_space:
lowerCamelCase_ : Union[str, Any] = """ """.join(inputs.strip().split() )
else:
lowerCamelCase_ : Optional[int] = inputs
lowerCamelCase_ : List[str] = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
lowerCamelCase_ : Any = unicodedata.normalize("""NFKD""" , __a )
lowerCamelCase_ : Any = """""".join([c for c in outputs if not unicodedata.combining(__a )] )
if self.do_lower_case:
lowerCamelCase_ : int = outputs.lower()
return outputs
def _lowerCAmelCase ( self : Union[str, Any] , __a : str ) ->List[str]:
lowerCamelCase_ : List[str] = self.preprocess_text(__a )
lowerCamelCase_ : Any = self.sp_model.encode(__a , out_type=__a )
lowerCamelCase_ : Dict = []
for piece in pieces:
if len(__a ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
lowerCamelCase_ : int = self.sp_model.EncodeAsPieces(piece[:-1].replace(__a , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase_ : Union[str, Any] = cur_pieces[1:]
else:
lowerCamelCase_ : Optional[Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__a )
else:
new_pieces.append(__a )
return new_pieces
def _lowerCAmelCase ( self : Optional[Any] , __a : Dict ) ->Optional[Any]:
return self.sp_model.PieceToId(__a )
def _lowerCAmelCase ( self : List[str] , __a : Any ) ->Tuple:
return self.sp_model.IdToPiece(__a )
def _lowerCAmelCase ( self : Any , __a : str ) ->Dict:
lowerCamelCase_ : Tuple = """""".join(__a ).replace(__a , """ """ ).strip()
return out_string
def _lowerCAmelCase ( self : Any , __a : List[int] , __a : Optional[List[int]] = None ) ->List[int]:
lowerCamelCase_ : Tuple = [self.sep_token_id]
lowerCamelCase_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCAmelCase ( self : int , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ) ->List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is not None:
return ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1, 1]
return ([0] * len(__a )) + [1, 1]
def _lowerCAmelCase ( self : List[str] , __a : List[int] , __a : Optional[List[int]] = None ) ->List[int]:
lowerCamelCase_ : List[Any] = [self.sep_token_id]
lowerCamelCase_ : Union[str, Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowerCAmelCase ( self : str , __a : str , __a : Optional[str] = None ) ->Tuple[str]:
if not os.path.isdir(__a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase_ : int = os.path.join(
__a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , """wb""" ) as fi:
lowerCamelCase_ : Any = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
def _lowerCAmelCase ( self : Optional[int] , *__a : Dict , **__a : str ) ->str:
lowerCamelCase_ : Tuple = super()._decode(*__a , **__a )
lowerCamelCase_ : List[str] = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 171
|
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = ""
_a = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self : Any , __a : Optional[DatasetInfo] = None , __a : Optional[str] = None , **__a : Any , ) ->Any:
super().__init__(self , **__a )
lowerCamelCase_ : Tuple = repo_info
lowerCamelCase_ : Any = token
lowerCamelCase_ : Any = None
def _lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
if self.dir_cache is None:
lowerCamelCase_ : Dict = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
lowerCamelCase_ : int = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(__a ): {"""name""": str(__a ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def _lowerCAmelCase ( self : int , __a : str , __a : str = "rb" , **__a : Optional[Any] , ) ->Dict:
if not isinstance(self.repo_info , __a ):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
lowerCamelCase_ : int = hf_hub_url(self.repo_info.id , __a , revision=self.repo_info.sha )
return fsspec.open(
__a , mode=__a , headers=get_authentication_headers_for_url(__a , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def _lowerCAmelCase ( self : Dict , __a : str , **__a : List[Any] ) ->List[Any]:
self._get_dirs()
lowerCamelCase_ : Tuple = self._strip_protocol(__a )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__a )
def _lowerCAmelCase ( self : Any , __a : Optional[Any] , __a : str=False , **__a : List[str] ) ->List[Any]:
self._get_dirs()
lowerCamelCase_ : Optional[Any] = PurePosixPath(path.strip("""/""" ) )
lowerCamelCase_ : Dict = {}
for p, f in self.dir_cache.items():
lowerCamelCase_ : str = PurePosixPath(p.strip("""/""" ) )
lowerCamelCase_ : Dict = p.parent
if root == path:
lowerCamelCase_ : int = f
lowerCamelCase_ : List[str] = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 171
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
def a_ ( self , lowercase_ , lowercase_ ) -> Union[str, Any]:
UpperCAmelCase = jnp.ones((batch_size, length) ) / length
return scores
def a_ ( self ) -> str:
UpperCAmelCase = None
UpperCAmelCase = 2_0
UpperCAmelCase = self._get_uniform_logits(batch_size=2 , length=_lowercase )
# tweak scores to not be uniform anymore
UpperCAmelCase = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
UpperCAmelCase = scores.at[1, 1_0].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
UpperCAmelCase = jax.nn.softmax(_lowercase , axis=-1 )
UpperCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCAmelCase = FlaxTemperatureLogitsWarper(temperature=1.3 )
UpperCAmelCase = jax.nn.softmax(temp_dist_warper_sharper(_lowercase , scores.copy() , cur_len=_lowercase ) , axis=-1 )
UpperCAmelCase = jax.nn.softmax(temp_dist_warper_smoother(_lowercase , scores.copy() , cur_len=_lowercase ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def a_ ( self ) -> List[Any]:
UpperCAmelCase = None
UpperCAmelCase = 1_0
UpperCAmelCase = 2
# create ramp distribution
UpperCAmelCase = np.broadcast_to(np.arange(_lowercase )[None, :] , (batch_size, vocab_size) ).copy()
UpperCAmelCase = ramp_logits[1:, : vocab_size // 2] + vocab_size
UpperCAmelCase = FlaxTopKLogitsWarper(3 )
UpperCAmelCase = top_k_warp(_lowercase , _lowercase , cur_len=_lowercase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
UpperCAmelCase = 5
UpperCAmelCase = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
UpperCAmelCase = np.broadcast_to(np.arange(_lowercase )[None, :] , (batch_size, length) ).copy()
UpperCAmelCase = top_k_warp_safety_check(_lowercase , _lowercase , cur_len=_lowercase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def a_ ( self ) -> Dict:
UpperCAmelCase = None
UpperCAmelCase = 1_0
UpperCAmelCase = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
UpperCAmelCase = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
UpperCAmelCase = FlaxTopPLogitsWarper(0.8 )
UpperCAmelCase = np.exp(top_p_warp(_lowercase , _lowercase , cur_len=_lowercase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
UpperCAmelCase = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(_lowercase , _lowercase , atol=1E-3 ) )
# check edge cases with negative and extreme logits
UpperCAmelCase = np.broadcast_to(np.arange(_lowercase )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
UpperCAmelCase = ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
UpperCAmelCase = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
UpperCAmelCase = top_p_warp(_lowercase , _lowercase , cur_len=_lowercase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def a_ ( self ) -> Union[str, Any]:
UpperCAmelCase = 2_0
UpperCAmelCase = 4
UpperCAmelCase = 0
UpperCAmelCase = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=_lowercase )
# check that min length is applied at length 5
UpperCAmelCase = ids_tensor((batch_size, 2_0) , vocab_size=2_0 )
UpperCAmelCase = 5
UpperCAmelCase = self._get_uniform_logits(_lowercase , _lowercase )
UpperCAmelCase = min_dist_processor(_lowercase , _lowercase , cur_len=_lowercase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('inf' )] )
# check that min length is not applied anymore at length 15
UpperCAmelCase = self._get_uniform_logits(_lowercase , _lowercase )
UpperCAmelCase = 1_5
UpperCAmelCase = min_dist_processor(_lowercase , _lowercase , cur_len=_lowercase )
self.assertFalse(jnp.isinf(_lowercase ).any() )
def a_ ( self ) -> List[Any]:
UpperCAmelCase = 2_0
UpperCAmelCase = 4
UpperCAmelCase = 0
UpperCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowercase )
# check that all scores are -inf except the bos_token_id score
UpperCAmelCase = ids_tensor((batch_size, 1) , vocab_size=2_0 )
UpperCAmelCase = 1
UpperCAmelCase = self._get_uniform_logits(_lowercase , _lowercase )
UpperCAmelCase = logits_processor(_lowercase , _lowercase , cur_len=_lowercase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
UpperCAmelCase = 3
UpperCAmelCase = self._get_uniform_logits(_lowercase , _lowercase )
UpperCAmelCase = logits_processor(_lowercase , _lowercase , cur_len=_lowercase )
self.assertFalse(jnp.isinf(_lowercase ).any() )
def a_ ( self ) -> Optional[Any]:
UpperCAmelCase = 2_0
UpperCAmelCase = 4
UpperCAmelCase = 0
UpperCAmelCase = 5
UpperCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowercase , eos_token_id=_lowercase )
# check that all scores are -inf except the eos_token_id when max_length is reached
UpperCAmelCase = ids_tensor((batch_size, 4) , vocab_size=2_0 )
UpperCAmelCase = 4
UpperCAmelCase = self._get_uniform_logits(_lowercase , _lowercase )
UpperCAmelCase = logits_processor(_lowercase , _lowercase , cur_len=_lowercase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
UpperCAmelCase = 3
UpperCAmelCase = self._get_uniform_logits(_lowercase , _lowercase )
UpperCAmelCase = logits_processor(_lowercase , _lowercase , cur_len=_lowercase )
self.assertFalse(jnp.isinf(_lowercase ).any() )
def a_ ( self ) -> Optional[int]:
UpperCAmelCase = 4
UpperCAmelCase = 1_0
UpperCAmelCase = 1_5
UpperCAmelCase = 2
UpperCAmelCase = 1
UpperCAmelCase = 1_5
# dummy input_ids and scores
UpperCAmelCase = ids_tensor((batch_size, sequence_length) , _lowercase )
UpperCAmelCase = input_ids.copy()
UpperCAmelCase = self._get_uniform_logits(_lowercase , _lowercase )
UpperCAmelCase = scores.copy()
# instantiate all dist processors
UpperCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCAmelCase = FlaxTopKLogitsWarper(3 )
UpperCAmelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
UpperCAmelCase = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=_lowercase )
UpperCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowercase )
UpperCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowercase , eos_token_id=_lowercase )
UpperCAmelCase = 1_0
# no processor list
UpperCAmelCase = temp_dist_warp(_lowercase , _lowercase , cur_len=_lowercase )
UpperCAmelCase = top_k_warp(_lowercase , _lowercase , cur_len=_lowercase )
UpperCAmelCase = top_p_warp(_lowercase , _lowercase , cur_len=_lowercase )
UpperCAmelCase = min_dist_proc(_lowercase , _lowercase , cur_len=_lowercase )
UpperCAmelCase = bos_dist_proc(_lowercase , _lowercase , cur_len=_lowercase )
UpperCAmelCase = eos_dist_proc(_lowercase , _lowercase , cur_len=_lowercase )
# with processor list
UpperCAmelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
UpperCAmelCase = processor(_lowercase , _lowercase , cur_len=_lowercase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowercase , _lowercase , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def a_ ( self ) -> Dict:
UpperCAmelCase = 4
UpperCAmelCase = 1_0
UpperCAmelCase = 1_5
UpperCAmelCase = 2
UpperCAmelCase = 1
UpperCAmelCase = 1_5
# dummy input_ids and scores
UpperCAmelCase = ids_tensor((batch_size, sequence_length) , _lowercase )
UpperCAmelCase = input_ids.copy()
UpperCAmelCase = self._get_uniform_logits(_lowercase , _lowercase )
UpperCAmelCase = scores.copy()
# instantiate all dist processors
UpperCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCAmelCase = FlaxTopKLogitsWarper(3 )
UpperCAmelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
UpperCAmelCase = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=_lowercase )
UpperCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowercase )
UpperCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowercase , eos_token_id=_lowercase )
UpperCAmelCase = 1_0
# no processor list
def run_no_processor_list(lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = temp_dist_warp(_lowercase , _lowercase , cur_len=_lowercase )
UpperCAmelCase = top_k_warp(_lowercase , _lowercase , cur_len=_lowercase )
UpperCAmelCase = top_p_warp(_lowercase , _lowercase , cur_len=_lowercase )
UpperCAmelCase = min_dist_proc(_lowercase , _lowercase , cur_len=_lowercase )
UpperCAmelCase = bos_dist_proc(_lowercase , _lowercase , cur_len=_lowercase )
UpperCAmelCase = eos_dist_proc(_lowercase , _lowercase , cur_len=_lowercase )
return scores
# with processor list
def run_processor_list(lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
UpperCAmelCase = processor(_lowercase , _lowercase , cur_len=_lowercase )
return scores
UpperCAmelCase = jax.jit(_lowercase )
UpperCAmelCase = jax.jit(_lowercase )
UpperCAmelCase = jitted_run_no_processor_list(_lowercase , _lowercase , _lowercase )
UpperCAmelCase = jitted_run_processor_list(_lowercase , _lowercase , _lowercase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowercase , _lowercase , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 373
|
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowerCamelCase__ :
def __init__( self : Optional[Any] , _lowercase : int=2 , _lowercase : Optional[Any]=3 , _lowercase : Any=64 , _lowercase : Tuple=None ):
A = np.random.default_rng(_lowercase )
A = length
A = rng.normal(size=(length,) ).astype(np.floataa )
A = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : str ):
return self.length
def __getitem__( self : List[str] , _lowercase : int ):
return {"x": self.x[i], "y": self.y[i]}
class lowerCamelCase__ ( torch.nn.Module ):
def __init__( self : Optional[int] , _lowercase : Any=0 , _lowercase : List[Any]=0 , _lowercase : Optional[int]=False ):
super().__init__()
A = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
A = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
A = True
def __a ( self : Optional[Any] , _lowercase : str=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
A = False
return x * self.a[0] + self.b[0]
class lowerCamelCase__ ( torch.nn.Module ):
def __init__( self : Optional[Any] , _lowercase : Any=0 , _lowercase : List[str]=0 , _lowercase : str=False ):
super().__init__()
A = torch.nn.Parameter(torch.tensor(_lowercase ).float() )
A = torch.nn.Parameter(torch.tensor(_lowercase ).float() )
A = True
def __a ( self : int , _lowercase : Tuple=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
A = False
return x * self.a + self.b
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ = 16 ) -> Optional[Any]:
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
A = AutoTokenizer.from_pretrained('bert-base-cased' )
A = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
A = load_dataset('csv' , data_files=UpperCamelCase__ )
A = datasets['train'].unique('label' )
A = {v: i for i, v in enumerate(UpperCamelCase__ )}
def tokenize_function(UpperCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
A = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='max_length' )
if "label" in examples:
A = [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(UpperCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCamelCase__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(UpperCamelCase__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
A = DataLoader(tokenized_datasets['train'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=2 )
A = DataLoader(tokenized_datasets['validation'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=1 )
return train_dataloader, eval_dataloader
| 690
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ :Union[str, Any] = logging.get_logger(__name__)
a_ :Dict = {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/config.json',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[str] = '''xglm'''
lowerCamelCase : int = ['''past_key_values''']
lowerCamelCase : Tuple = {
'''num_attention_heads''': '''attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Any , _lowercase : Tuple=25_60_08 , _lowercase : List[Any]=20_48 , _lowercase : Optional[int]=10_24 , _lowercase : Tuple=40_96 , _lowercase : int=24 , _lowercase : Optional[Any]=16 , _lowercase : Any="gelu" , _lowercase : Tuple=0.1 , _lowercase : str=0.1 , _lowercase : str=0.0 , _lowercase : Tuple=0.0 , _lowercase : Any=0.02 , _lowercase : Optional[int]=True , _lowercase : int=True , _lowercase : Optional[Any]=2 , _lowercase : Any=1 , _lowercase : Union[str, Any]=0 , _lowercase : int=2 , **_lowercase : List[str] , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE__ : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE__ : Any = ffn_dim
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = activation_function
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dropout
SCREAMING_SNAKE_CASE__ : int = attention_dropout
SCREAMING_SNAKE_CASE__ : Optional[Any] = activation_dropout
SCREAMING_SNAKE_CASE__ : str = layerdrop
SCREAMING_SNAKE_CASE__ : List[Any] = init_std
SCREAMING_SNAKE_CASE__ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE__ : Any = use_cache
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , decoder_start_token_id=_lowercase , **_lowercase , )
| 719
|
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('1.0.0a'):
raise Exception('requires fairseq >= 1.0.0a')
logging.set_verbosity_info()
a_ :Tuple = logging.get_logger(__name__)
a_ :List[str] = 'Hello world! cécé herlolip'
def a ( A__ , A__ , A__ ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = FairseqRobertaModel.from_pretrained(A__ )
roberta.eval() # disable dropout
SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.encoder.sentence_encoder
SCREAMING_SNAKE_CASE__ : Optional[Any] = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our RoBERTa config:''' , A__ )
SCREAMING_SNAKE_CASE__ : Tuple = XLMRobertaXLForSequenceClassification(A__ ) if classification_head else XLMRobertaXLForMaskedLM(A__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
SCREAMING_SNAKE_CASE__ : List[str] = roberta_sent_encoder.embed_tokens.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_sent_encoder.embed_positions.weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
SCREAMING_SNAKE_CASE__ : int = roberta_sent_encoder.layer_norm.weight
SCREAMING_SNAKE_CASE__ : int = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
SCREAMING_SNAKE_CASE__ : BertLayer = model.roberta.encoder.layer[i]
SCREAMING_SNAKE_CASE__ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
SCREAMING_SNAKE_CASE__ : RobertaAttention = layer.attention
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta_layer.self_attn_layer_norm.weight
SCREAMING_SNAKE_CASE__ : Any = roberta_layer.self_attn_layer_norm.bias
# self attention
SCREAMING_SNAKE_CASE__ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.q_proj.weight
SCREAMING_SNAKE_CASE__ : int = roberta_layer.self_attn.q_proj.bias
SCREAMING_SNAKE_CASE__ : Any = roberta_layer.self_attn.k_proj.weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta_layer.self_attn.k_proj.bias
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.self_attn.v_proj.weight
SCREAMING_SNAKE_CASE__ : Dict = roberta_layer.self_attn.v_proj.bias
# self-attention output
SCREAMING_SNAKE_CASE__ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
SCREAMING_SNAKE_CASE__ : str = roberta_layer.self_attn.out_proj.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.final_layer_norm.weight
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.final_layer_norm.bias
# intermediate
SCREAMING_SNAKE_CASE__ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.fca.bias
# output
SCREAMING_SNAKE_CASE__ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Any = roberta_layer.fca.bias
# end of layer
if classification_head:
SCREAMING_SNAKE_CASE__ : Dict = roberta.model.classification_heads['''mnli'''].dense.weight
SCREAMING_SNAKE_CASE__ : Any = roberta.model.classification_heads['''mnli'''].dense.bias
SCREAMING_SNAKE_CASE__ : Tuple = roberta.model.classification_heads['''mnli'''].out_proj.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
SCREAMING_SNAKE_CASE__ : str = roberta.model.encoder.lm_head.dense.weight
SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.encoder.lm_head.dense.bias
SCREAMING_SNAKE_CASE__ : Tuple = roberta.model.encoder.lm_head.layer_norm.weight
SCREAMING_SNAKE_CASE__ : Dict = roberta.model.encoder.lm_head.layer_norm.bias
SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.encoder.lm_head.weight
SCREAMING_SNAKE_CASE__ : Tuple = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
SCREAMING_SNAKE_CASE__ : torch.Tensor = roberta.encode(A__ ).unsqueeze(0 ) # batch of size 1
SCREAMING_SNAKE_CASE__ : List[str] = model(A__ )[0]
if classification_head:
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta.model.classification_heads['''mnli'''](roberta.extract_features(A__ ) )
else:
SCREAMING_SNAKE_CASE__ : Dict = roberta.model(A__ )[0]
print(our_output.shape , their_output.shape )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.allclose(A__ , A__ , atol=1e-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
pathlib.Path(A__ ).mkdir(parents=A__ , exist_ok=A__ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(A__ )
if __name__ == "__main__":
a_ :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
a_ :str = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 250
| 0
|
'''simple docstring'''
from datetime import datetime
import requests
def UpperCamelCase_ ( A__ : str ):
'''simple docstring'''
lowerCAmelCase_ : str = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url="""
lowerCAmelCase_ : Tuple = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""]
return requests.get(A__ ).content
if __name__ == "__main__":
__A : Dict = input("Enter Video/IGTV url: ").strip()
__A : Any = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(F'''Done. Video saved to disk as {file_name}.''')
| 275
|
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 275
| 1
|
import math
UpperCAmelCase__ : Any =10
UpperCAmelCase__ : Optional[Any] =7
UpperCAmelCase__ : List[str] =BALLS_PER_COLOUR * NUM_COLOURS
def _lowercase ( _UpperCAmelCase = 20 ) -> str:
lowerCamelCase =math.comb(_UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase =math.comb(NUM_BALLS - BALLS_PER_COLOUR , _UpperCAmelCase )
lowerCamelCase =NUM_COLOURS * (1 - missing_colour / total)
return F"""{result:.9f}"""
if __name__ == "__main__":
print(solution(20))
| 269
|
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> list[str]:
return [sentence[i : i + ngram_size] for i in range(len(_UpperCAmelCase ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 269
| 1
|
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( a__ : Any ,a__ : Optional[Any] ) -> Union[str, Any]:
# Load checkpoint
__A : Union[str, Any] = torch.load(a__ ,map_location="""cpu""" )
__A : Optional[Any] = chkpt["""model"""]
# We have the base model one level deeper than the original XLM repository
__A : Optional[int] = {}
for k, v in state_dict.items():
if "pred_layer" in k:
__A : Tuple = v
else:
__A : Dict = v
__A : Optional[Any] = chkpt["""params"""]
__A : Tuple = {n: v for n, v in config.items() if not isinstance(a__ ,(torch.FloatTensor, numpy.ndarray) )}
__A : List[Any] = chkpt["""dico_word2id"""]
__A : Tuple = {s + """</w>""" if s.find("""@@""" ) == -1 and i > 13 else s.replace("""@@""" ,"""""" ): i for s, i in vocab.items()}
# Save pytorch-model
__A : Any = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
__A : Union[str, Any] = pytorch_dump_folder_path + """/""" + CONFIG_NAME
__A : Optional[Any] = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""vocab_file"""]
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(a__ ,a__ )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(a__ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(a__ ,indent=2 ) + """\n""" )
print(f"""Save vocab file to {pytorch_config_dump_path}""" )
with open(a__ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(a__ ,indent=2 ) + """\n""" )
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xlm_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase_ : int = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 17
|
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 278
| 0
|
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = 42
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCAmelCase=3 , UpperCAmelCase=3 , UpperCAmelCase=("DownEncoderBlock2D",) , UpperCAmelCase=(64,) , UpperCAmelCase=2 , UpperCAmelCase=32 , UpperCAmelCase="silu" , UpperCAmelCase=True , ) -> Tuple:
'''simple docstring'''
super().__init__()
lowercase_ = layers_per_block
lowercase_ = torch.nn.Convad(
UpperCAmelCase , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
lowercase_ = None
lowercase_ = nn.ModuleList([] )
# down
lowercase_ = block_out_channels[0]
for i, down_block_type in enumerate(UpperCAmelCase ):
lowercase_ = output_channel
lowercase_ = block_out_channels[i]
lowercase_ = i == len(UpperCAmelCase ) - 1
lowercase_ = get_down_block(
UpperCAmelCase , num_layers=self.layers_per_block , in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=UpperCAmelCase , resnet_groups=UpperCAmelCase , attention_head_dim=UpperCAmelCase , temb_channels=UpperCAmelCase , )
self.down_blocks.append(UpperCAmelCase )
# mid
lowercase_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCAmelCase , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCAmelCase , temb_channels=UpperCAmelCase , )
# out
lowercase_ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=UpperCAmelCase , eps=1e-6 )
lowercase_ = nn.SiLU()
lowercase_ = 2 * out_channels if double_z else out_channels
lowercase_ = nn.Convad(block_out_channels[-1] , UpperCAmelCase , 3 , padding=1 )
lowercase_ = False
def A__ ( self , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = x
lowercase_ = self.conv_in(UpperCAmelCase )
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCAmelCase ):
def custom_forward(*UpperCAmelCase ):
return module(*UpperCAmelCase )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
lowercase_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCAmelCase ) , UpperCAmelCase , use_reentrant=UpperCAmelCase )
# middle
lowercase_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCAmelCase , use_reentrant=UpperCAmelCase )
else:
for down_block in self.down_blocks:
lowercase_ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCAmelCase ) , UpperCAmelCase )
# middle
lowercase_ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , UpperCAmelCase )
else:
# down
for down_block in self.down_blocks:
lowercase_ = down_block(UpperCAmelCase )
# middle
lowercase_ = self.mid_block(UpperCAmelCase )
# post-process
lowercase_ = self.conv_norm_out(UpperCAmelCase )
lowercase_ = self.conv_act(UpperCAmelCase )
lowercase_ = self.conv_out(UpperCAmelCase )
return sample
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCAmelCase=3 , UpperCAmelCase=3 , UpperCAmelCase=("UpDecoderBlock2D",) , UpperCAmelCase=(64,) , UpperCAmelCase=2 , UpperCAmelCase=32 , UpperCAmelCase="silu" , UpperCAmelCase="group" , ) -> int:
'''simple docstring'''
super().__init__()
lowercase_ = layers_per_block
lowercase_ = nn.Convad(
UpperCAmelCase , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
lowercase_ = None
lowercase_ = nn.ModuleList([] )
lowercase_ = in_channels if norm_type == "spatial" else None
# mid
lowercase_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCAmelCase , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCAmelCase , temb_channels=UpperCAmelCase , )
# up
lowercase_ = list(reversed(UpperCAmelCase ) )
lowercase_ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(UpperCAmelCase ):
lowercase_ = output_channel
lowercase_ = reversed_block_out_channels[i]
lowercase_ = i == len(UpperCAmelCase ) - 1
lowercase_ = get_up_block(
UpperCAmelCase , num_layers=self.layers_per_block + 1 , in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , prev_output_channel=UpperCAmelCase , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=UpperCAmelCase , resnet_groups=UpperCAmelCase , attention_head_dim=UpperCAmelCase , temb_channels=UpperCAmelCase , resnet_time_scale_shift=UpperCAmelCase , )
self.up_blocks.append(UpperCAmelCase )
lowercase_ = output_channel
# out
if norm_type == "spatial":
lowercase_ = SpatialNorm(block_out_channels[0] , UpperCAmelCase )
else:
lowercase_ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=UpperCAmelCase , eps=1e-6 )
lowercase_ = nn.SiLU()
lowercase_ = nn.Convad(block_out_channels[0] , UpperCAmelCase , 3 , padding=1 )
lowercase_ = False
def A__ ( self , UpperCAmelCase , UpperCAmelCase=None ) -> Dict:
'''simple docstring'''
lowercase_ = z
lowercase_ = self.conv_in(UpperCAmelCase )
lowercase_ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCAmelCase ):
def custom_forward(*UpperCAmelCase ):
return module(*UpperCAmelCase )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
lowercase_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCAmelCase , UpperCAmelCase , use_reentrant=UpperCAmelCase )
lowercase_ = sample.to(UpperCAmelCase )
# up
for up_block in self.up_blocks:
lowercase_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCAmelCase ) , UpperCAmelCase , UpperCAmelCase , use_reentrant=UpperCAmelCase )
else:
# middle
lowercase_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCAmelCase , UpperCAmelCase )
lowercase_ = sample.to(UpperCAmelCase )
# up
for up_block in self.up_blocks:
lowercase_ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCAmelCase ) , UpperCAmelCase , UpperCAmelCase )
else:
# middle
lowercase_ = self.mid_block(UpperCAmelCase , UpperCAmelCase )
lowercase_ = sample.to(UpperCAmelCase )
# up
for up_block in self.up_blocks:
lowercase_ = up_block(UpperCAmelCase , UpperCAmelCase )
# post-process
if latent_embeds is None:
lowercase_ = self.conv_norm_out(UpperCAmelCase )
else:
lowercase_ = self.conv_norm_out(UpperCAmelCase , UpperCAmelCase )
lowercase_ = self.conv_act(UpperCAmelCase )
lowercase_ = self.conv_out(UpperCAmelCase )
return sample
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase="random" , UpperCAmelCase=False , UpperCAmelCase=True ) -> str:
'''simple docstring'''
super().__init__()
lowercase_ = n_e
lowercase_ = vq_embed_dim
lowercase_ = beta
lowercase_ = legacy
lowercase_ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
lowercase_ = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
lowercase_ = self.used.shape[0]
lowercase_ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
lowercase_ = self.re_embed
lowercase_ = self.re_embed + 1
print(
F'Remapping {self.n_e} indices to {self.re_embed} indices. '
F'Using {self.unknown_index} for unknown indices.' )
else:
lowercase_ = n_e
lowercase_ = sane_index_shape
def A__ ( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = inds.shape
assert len(UpperCAmelCase ) > 1
lowercase_ = inds.reshape(ishape[0] , -1 )
lowercase_ = self.used.to(UpperCAmelCase )
lowercase_ = (inds[:, :, None] == used[None, None, ...]).long()
lowercase_ = match.argmax(-1 )
lowercase_ = match.sum(2 ) < 1
if self.unknown_index == "random":
lowercase_ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
lowercase_ = self.unknown_index
return new.reshape(UpperCAmelCase )
def A__ ( self , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = inds.shape
assert len(UpperCAmelCase ) > 1
lowercase_ = inds.reshape(ishape[0] , -1 )
lowercase_ = self.used.to(UpperCAmelCase )
if self.re_embed > self.used.shape[0]: # extra token
lowercase_ = 0 # simply set to zero
lowercase_ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , UpperCAmelCase )
return back.reshape(UpperCAmelCase )
def A__ ( self , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = z.permute(0 , 2 , 3 , 1 ).contiguous()
lowercase_ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
lowercase_ = torch.argmin(torch.cdist(UpperCAmelCase , self.embedding.weight ) , dim=1 )
lowercase_ = self.embedding(UpperCAmelCase ).view(z.shape )
lowercase_ = None
lowercase_ = None
# compute loss for embedding
if not self.legacy:
lowercase_ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
lowercase_ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
lowercase_ = z + (z_q - z).detach()
# reshape back to match original input shape
lowercase_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
lowercase_ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
lowercase_ = self.remap_to_used(UpperCAmelCase )
lowercase_ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
lowercase_ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> int:
'''simple docstring'''
if self.remap is not None:
lowercase_ = indices.reshape(shape[0] , -1 ) # add batch axis
lowercase_ = self.unmap_to_all(UpperCAmelCase )
lowercase_ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
lowercase_ = self.embedding(UpperCAmelCase )
if shape is not None:
lowercase_ = z_q.view(UpperCAmelCase )
# reshape back to match original input shape
lowercase_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=False ) -> List[Any]:
'''simple docstring'''
lowercase_ = parameters
lowercase_ , lowercase_ = torch.chunk(UpperCAmelCase , 2 , dim=1 )
lowercase_ = torch.clamp(self.logvar , -30.0 , 20.0 )
lowercase_ = deterministic
lowercase_ = torch.exp(0.5 * self.logvar )
lowercase_ = torch.exp(self.logvar )
if self.deterministic:
lowercase_ = lowercase_ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def A__ ( self , UpperCAmelCase = None ) -> torch.FloatTensor:
'''simple docstring'''
lowercase_ = randn_tensor(
self.mean.shape , generator=UpperCAmelCase , device=self.parameters.device , dtype=self.parameters.dtype )
lowercase_ = self.mean + self.std * sample
return x
def A__ ( self , UpperCAmelCase=None ) -> Tuple:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase=[1, 2, 3] ) -> Tuple:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
lowercase_ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=UpperCAmelCase )
def A__ ( self ) -> Tuple:
'''simple docstring'''
return self.mean
| 712
|
import os
# Precomputes a list of the 100 first triangular numbers
SCREAMING_SNAKE_CASE__ = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)]
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = os.path.dirname(os.path.realpath(__lowerCamelCase ) )
lowercase_ = os.path.join(__lowerCamelCase , "words.txt" )
lowercase_ = ""
with open(__lowerCamelCase ) as f:
lowercase_ = f.readline()
lowercase_ = [word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
lowercase_ = [
word
for word in [sum(ord(__lowerCamelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 601
| 0
|
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__lowerCamelCase = 16
__lowerCamelCase = 32
def a ( __UpperCAmelCase : Accelerator , __UpperCAmelCase : int = 1_6 , __UpperCAmelCase : str = "bert-base-cased" ) -> str:
__magic_name__: Tuple = AutoTokenizer.from_pretrained(__UpperCAmelCase )
__magic_name__: Optional[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__UpperCAmelCase : Dict ):
# max_length=None => use the model max length (it's actually the default)
__magic_name__: Union[str, Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__magic_name__: List[Any] = datasets.map(
__UpperCAmelCase , batched=__UpperCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=__UpperCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__magic_name__: int = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__UpperCAmelCase : Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCAmelCase , padding="""max_length""" , max_length=1_2_8 , return_tensors="""pt""" )
return tokenizer.pad(__UpperCAmelCase , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
__magic_name__: Optional[Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=__UpperCAmelCase )
__magic_name__: Any = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=__UpperCAmelCase )
return train_dataloader, eval_dataloader
def a ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> Any:
model.eval()
__magic_name__: List[str] = 0
for step, batch in enumerate(__UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__magic_name__: Union[str, Any] = model(**__UpperCAmelCase )
__magic_name__: Optional[Any] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__magic_name__, __magic_name__: str = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__UpperCAmelCase ) - 1:
__magic_name__: List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__magic_name__: Dict = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__UpperCAmelCase , references=__UpperCAmelCase , )
__magic_name__: Any = metric.compute()
return eval_metric["accuracy"]
def a ( __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict ) -> Optional[Any]:
# Initialize accelerator
__magic_name__: List[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__magic_name__: Union[str, Any] = config["""lr"""]
__magic_name__: Dict = int(config["""num_epochs"""] )
__magic_name__: str = int(config["""seed"""] )
__magic_name__: Union[str, Any] = int(config["""batch_size"""] )
__magic_name__: Tuple = args.model_name_or_path
set_seed(__UpperCAmelCase )
__magic_name__, __magic_name__: Tuple = get_dataloaders(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__magic_name__: Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(__UpperCAmelCase , return_dict=__UpperCAmelCase )
# Instantiate optimizer
__magic_name__: List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__magic_name__: Optional[int] = optimizer_cls(params=model.parameters() , lr=__UpperCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
__magic_name__: Tuple = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
__magic_name__: Optional[Any] = 1
__magic_name__: Any = (len(__UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__magic_name__: Dict = get_linear_schedule_with_warmup(
optimizer=__UpperCAmelCase , num_warmup_steps=0 , num_training_steps=__UpperCAmelCase , )
else:
__magic_name__: Tuple = DummyScheduler(__UpperCAmelCase , total_num_steps=__UpperCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__: Optional[Any] = accelerator.prepare(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# We need to keep track of how many total steps we have iterated over
__magic_name__: str = 0
# We also need to keep track of the stating epoch so files are named properly
__magic_name__: List[str] = 0
__magic_name__: Tuple = evaluate.load("""glue""" , """mrpc""" )
__magic_name__: Tuple = num_epochs
if args.partial_train_epoch is not None:
__magic_name__: int = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__magic_name__: Any = args.resume_from_checkpoint.split("""epoch_""" )[1]
__magic_name__: Optional[Any] = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__magic_name__: Union[str, Any] = int(__UpperCAmelCase ) + 1
__magic_name__: Union[str, Any] = evaluation_loop(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
accelerator.print("""resumed checkpoint performance:""" , __UpperCAmelCase )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , """r""" ) as f:
__magic_name__: Any = json.load(__UpperCAmelCase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__magic_name__: Optional[int] = {}
for epoch in range(__UpperCAmelCase , __UpperCAmelCase ):
model.train()
for step, batch in enumerate(__UpperCAmelCase ):
__magic_name__: Union[str, Any] = model(**__UpperCAmelCase )
__magic_name__: int = outputs.loss
__magic_name__: Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(__UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__magic_name__: int = f'epoch_{epoch}'
__magic_name__: Tuple = os.path.join(args.output_dir , __UpperCAmelCase )
accelerator.save_state(__UpperCAmelCase )
__magic_name__: Optional[Any] = evaluation_loop(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__magic_name__: str = accuracy
__magic_name__: Tuple = lr_scheduler.get_lr()[0]
__magic_name__: List[Any] = optimizer.param_groups[0]["""lr"""]
__magic_name__: Any = epoch
__magic_name__: List[str] = overall_step
accelerator.print(f'epoch {epoch}:' , __UpperCAmelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , """w""" ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def a ( ) -> Any:
__magic_name__: Tuple = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=__UpperCAmelCase , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=__UpperCAmelCase , )
parser.add_argument(
"""--output_dir""" , type=__UpperCAmelCase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=__UpperCAmelCase , default=__UpperCAmelCase , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=__UpperCAmelCase , default=__UpperCAmelCase , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=__UpperCAmelCase , default=2 , help="""Number of train epochs.""" , )
__magic_name__: Optional[int] = parser.parse_args()
__magic_name__: Optional[Any] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 4_2, """batch_size""": 1_6}
training_function(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
main()
| 96
|
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def a_ ( ) -> Optional[Any]:
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__lowercase ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def a_ ( ) -> Optional[int]:
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def a_ ( ) -> Dict:
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__lowercase ):
http_head('https://huggingface.co' )
| 686
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( _snake_case : int ):
_lowercase = [0] * len(_snake_case )
_lowercase = []
_lowercase = []
_lowercase = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_snake_case ) ):
if indegree[i] == 0:
queue.append(_snake_case )
while queue:
_lowercase = queue.pop(0 )
cnt += 1
topo.append(_snake_case )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_snake_case )
if cnt != len(_snake_case ):
print("Cycle exists" )
else:
print(_snake_case )
# Adjacency List of Graph
__UpperCamelCase : Tuple = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 227
|
"""simple docstring"""
def __UpperCAmelCase ( _snake_case : list, _snake_case : list, _snake_case : int ):
if len(_snake_case ) != len(_snake_case ):
raise ValueError("The length of profit and weight must be same." )
if max_weight <= 0:
raise ValueError("max_weight must greater than zero." )
if any(p < 0 for p in profit ):
raise ValueError("Profit can not be negative." )
if any(w < 0 for w in weight ):
raise ValueError("Weight can not be negative." )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
_lowercase = [p / w for p, w in zip(_snake_case, _snake_case )]
# Creating a copy of the list and sorting profit/weight in ascending order
_lowercase = sorted(_snake_case )
# declaring useful variables
_lowercase = len(_snake_case )
_lowercase = 0
_lowercase = 0
_lowercase = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
_lowercase = sorted_profit_by_weight[length - i - 1]
_lowercase = profit_by_weight.index(_snake_case )
_lowercase = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"Input profits, weights, and then max_weight (all positive ints) separated by "
"spaces."
)
__UpperCamelCase : Any = [int(x) for x in input("Input profits separated by spaces: ").split()]
__UpperCamelCase : Optional[Any] = [int(x) for x in input("Input weights separated by spaces: ").split()]
__UpperCamelCase : Dict = int(input("Max weight allowed: "))
# Function Call
calc_profit(profit, weight, max_weight)
| 227
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCamelCase = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 96
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __A ( unittest.TestCase ):
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
__magic_name__: List[str] = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
__magic_name__: List[Any] = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
__magic_name__: Union[str, Any] = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
__magic_name__: Optional[int] = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 1_6_0_0_0,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
__magic_name__: int = tempfile.mkdtemp()
__magic_name__: Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__magic_name__: Tuple = os.path.join(self.tmpdirname , __snake_case )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__snake_case ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__snake_case ) + """\n""" )
# load decoder from hub
__magic_name__: Dict = """hf-internal-testing/ngram-beam-search-decoder"""
def lowerCamelCase__ ( self : Any , **__snake_case : str ) -> Optional[int]:
__magic_name__: Union[str, Any] = self.add_kwargs_tokens_map.copy()
kwargs.update(__snake_case )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def lowerCamelCase__ ( self : str , **__snake_case : int ) -> Dict:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__snake_case )
def lowerCamelCase__ ( self : int , **__snake_case : List[str] ) -> int:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self : List[Any] ) -> Optional[Any]:
__magic_name__: Dict = self.get_tokenizer()
__magic_name__: Any = self.get_feature_extractor()
__magic_name__: Tuple = self.get_decoder()
__magic_name__: Tuple = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
processor.save_pretrained(self.tmpdirname )
__magic_name__: Dict = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __snake_case )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __snake_case )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __snake_case )
def lowerCamelCase__ ( self : Any ) -> Tuple:
__magic_name__: Union[str, Any] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__magic_name__: int = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[Any]:
__magic_name__: Union[str, Any] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(__snake_case , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=__snake_case , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
__magic_name__: int = self.get_feature_extractor()
__magic_name__: Optional[Any] = self.get_tokenizer()
__magic_name__: List[Any] = self.get_decoder()
__magic_name__: int = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
__magic_name__: Tuple = floats_list((3, 1_0_0_0) )
__magic_name__: List[str] = feature_extractor(__snake_case , return_tensors="""np""" )
__magic_name__: Tuple = processor(__snake_case , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[str]:
__magic_name__: Tuple = self.get_feature_extractor()
__magic_name__: List[str] = self.get_tokenizer()
__magic_name__: str = self.get_decoder()
__magic_name__: Tuple = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
__magic_name__: Optional[int] = """This is a test string"""
__magic_name__: List[str] = processor(text=__snake_case )
__magic_name__: Tuple = tokenizer(__snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase__ ( self : int , __snake_case : List[str]=(2, 1_0, 1_6) , __snake_case : List[Any]=7_7 ) -> Dict:
np.random.seed(__snake_case )
return np.random.rand(*__snake_case )
def lowerCamelCase__ ( self : Any ) -> Any:
__magic_name__: int = self.get_feature_extractor()
__magic_name__: Tuple = self.get_tokenizer()
__magic_name__: Any = self.get_decoder()
__magic_name__: Tuple = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
__magic_name__: List[Any] = self._get_dummy_logits(shape=(1_0, 1_6) , seed=1_3 )
__magic_name__: str = processor.decode(__snake_case )
__magic_name__: Optional[int] = decoder.decode_beams(__snake_case )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def lowerCamelCase__ ( self : int , __snake_case : Dict ) -> Any:
__magic_name__: int = self.get_feature_extractor()
__magic_name__: List[Any] = self.get_tokenizer()
__magic_name__: int = self.get_decoder()
__magic_name__: Optional[Any] = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
__magic_name__: Optional[int] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__magic_name__: Optional[int] = processor.batch_decode(__snake_case )
else:
with get_context(__snake_case ).Pool() as pool:
__magic_name__: Any = processor.batch_decode(__snake_case , __snake_case )
__magic_name__: Dict = list(__snake_case )
with get_context("""fork""" ).Pool() as p:
__magic_name__: List[str] = decoder.decode_beams_batch(__snake_case , __snake_case )
__magic_name__, __magic_name__, __magic_name__: Optional[int] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__snake_case , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(__snake_case , decoded_processor.logit_score )
self.assertListEqual(__snake_case , decoded_processor.lm_score )
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
__magic_name__: List[str] = self.get_feature_extractor()
__magic_name__: Optional[Any] = self.get_tokenizer()
__magic_name__: Optional[int] = self.get_decoder()
__magic_name__: Dict = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
__magic_name__: str = self._get_dummy_logits()
__magic_name__: Dict = 1_5
__magic_name__: int = -20.0
__magic_name__: int = -4.0
__magic_name__: Dict = processor.batch_decode(
__snake_case , beam_width=__snake_case , beam_prune_logp=__snake_case , token_min_logp=__snake_case , )
__magic_name__: Optional[int] = decoded_processor_out.text
__magic_name__: Union[str, Any] = list(__snake_case )
with get_context("""fork""" ).Pool() as pool:
__magic_name__: str = decoder.decode_beams_batch(
__snake_case , __snake_case , beam_width=__snake_case , beam_prune_logp=__snake_case , token_min_logp=__snake_case , )
__magic_name__: Any = [d[0][0] for d in decoded_decoder_out]
__magic_name__: Optional[int] = [d[0][2] for d in decoded_decoder_out]
__magic_name__: Optional[Any] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__snake_case , __snake_case )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , __snake_case )
self.assertTrue(np.array_equal(__snake_case , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , __snake_case , atol=1E-3 ) )
self.assertTrue(np.array_equal(__snake_case , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , __snake_case , atol=1E-3 ) )
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
__magic_name__: int = self.get_feature_extractor()
__magic_name__: Any = self.get_tokenizer()
__magic_name__: Union[str, Any] = self.get_decoder()
__magic_name__: str = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
__magic_name__: Any = self._get_dummy_logits()
__magic_name__: Union[str, Any] = 2.0
__magic_name__: Optional[Any] = 5.0
__magic_name__: Optional[Any] = -20.0
__magic_name__: List[str] = True
__magic_name__: List[Any] = processor.batch_decode(
__snake_case , alpha=__snake_case , beta=__snake_case , unk_score_offset=__snake_case , lm_score_boundary=__snake_case , )
__magic_name__: Union[str, Any] = decoded_processor_out.text
__magic_name__: Union[str, Any] = list(__snake_case )
decoder.reset_params(
alpha=__snake_case , beta=__snake_case , unk_score_offset=__snake_case , lm_score_boundary=__snake_case , )
with get_context("""fork""" ).Pool() as pool:
__magic_name__: str = decoder.decode_beams_batch(
__snake_case , __snake_case , )
__magic_name__: List[str] = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__snake_case , __snake_case )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , __snake_case )
__magic_name__: List[str] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[Any]:
__magic_name__: List[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__magic_name__: Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
__magic_name__: Union[str, Any] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__magic_name__: Optional[int] = os.listdir(__snake_case )
__magic_name__: Union[str, Any] = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__snake_case , __snake_case )
def lowerCamelCase__ ( self : Any ) -> Any:
__magic_name__: int = snapshot_download("""hf-internal-testing/processor_with_lm""" )
__magic_name__: List[Any] = WavaVecaProcessorWithLM.from_pretrained(__snake_case )
__magic_name__: Any = processor.decoder.model_container[processor.decoder._model_key]
__magic_name__: int = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__magic_name__: str = os.listdir(__snake_case )
__magic_name__: Tuple = os.listdir(__snake_case )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__snake_case , __snake_case )
def lowerCamelCase__ ( self : Optional[int] ) -> int:
__magic_name__: List[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__magic_name__: List[str] = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__magic_name__: List[str] = floats_list((3, 1_0_0_0) )
__magic_name__: Tuple = processor_wavaveca(__snake_case , return_tensors="""np""" )
__magic_name__: Optional[Any] = processor_auto(__snake_case , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
__magic_name__: int = self._get_dummy_logits()
__magic_name__: List[Any] = processor_wavaveca.batch_decode(__snake_case )
__magic_name__: Union[str, Any] = processor_auto.batch_decode(__snake_case )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
__magic_name__: Optional[int] = self.get_feature_extractor()
__magic_name__: Any = self.get_tokenizer()
__magic_name__: Dict = self.get_decoder()
__magic_name__: List[str] = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def lowerCamelCase__ ( __snake_case : Optional[int] , __snake_case : int ) -> int:
__magic_name__: Any = [d[key] for d in offsets]
return retrieved_list
def lowerCamelCase__ ( self : str ) -> Union[str, Any]:
__magic_name__: Tuple = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__magic_name__: Tuple = self._get_dummy_logits()[0]
__magic_name__: List[Any] = processor.decode(__snake_case , output_word_offsets=__snake_case )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__snake_case , __snake_case ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def lowerCamelCase__ ( self : Optional[int] ) -> Dict:
__magic_name__: Optional[int] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__magic_name__: Optional[int] = self._get_dummy_logits()
__magic_name__: Any = processor.batch_decode(__snake_case , output_word_offsets=__snake_case )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__snake_case , __snake_case ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(__snake_case , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
import torch
__magic_name__: List[Any] = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=__snake_case )
__magic_name__: Dict = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=1_6_0_0_0 ) )
__magic_name__: Any = iter(__snake_case )
__magic_name__: Optional[int] = next(__snake_case )
__magic_name__: Optional[int] = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
__magic_name__: Tuple = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__magic_name__: List[str] = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
__magic_name__: List[Any] = model(__snake_case ).logits.cpu().numpy()
__magic_name__: Optional[Any] = processor.decode(logits[0] , output_word_offsets=__snake_case )
__magic_name__: List[str] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__magic_name__: str = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
__magic_name__: Tuple = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(__snake_case , """word""" ) ) , __snake_case )
self.assertEqual(""" """.join(self.get_from_offsets(__snake_case , """word""" ) ) , output.text )
# output times
__magic_name__: Dict = torch.tensor(self.get_from_offsets(__snake_case , """start_time""" ) )
__magic_name__: Optional[Any] = torch.tensor(self.get_from_offsets(__snake_case , """end_time""" ) )
# fmt: off
__magic_name__: Tuple = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
__magic_name__: int = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=0.01 ) )
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=0.01 ) )
| 96
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _a :
'''simple docstring'''
UpperCamelCase__ = 42
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase = namedtuple("""CoinsDistribResult""", """moves excess""")
def A( snake_case_ ):
"""simple docstring"""
if root is None:
return 0
# Validation
def count_nodes(snake_case_ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(snake_case_ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(snake_case_ ) != count_coins(snake_case_ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(snake_case_ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowercase__ , lowercase__: Optional[int] = get_distrib(node.left )
lowercase__ , lowercase__: Optional[int] = get_distrib(node.right )
lowercase__: Tuple = 1 - left_distrib_excess
lowercase__: List[Any] = 1 - right_distrib_excess
lowercase__: Optional[Any] = (
left_distrib_moves
+ right_distrib_moves
+ abs(snake_case_ )
+ abs(snake_case_ )
)
lowercase__: int = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(snake_case_ , snake_case_ )
return get_distrib(snake_case_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 120
|
"""simple docstring"""
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
UpperCamelCase = {
"""susnato/ernie-m-base_pytorch""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json""",
"""susnato/ernie-m-large_pytorch""": """https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json""",
}
class _a ( lowercase_ ):
'''simple docstring'''
UpperCamelCase__ = """ernie_m"""
UpperCamelCase__ = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , UpperCAmelCase_ = 250_002 , UpperCAmelCase_ = 768 , UpperCAmelCase_ = 12 , UpperCAmelCase_ = 12 , UpperCAmelCase_ = 3_072 , UpperCAmelCase_ = "gelu" , UpperCAmelCase_ = 0.1 , UpperCAmelCase_ = 0.1 , UpperCAmelCase_ = 514 , UpperCAmelCase_ = 0.02 , UpperCAmelCase_ = 1 , UpperCAmelCase_ = 1E-0_5 , UpperCAmelCase_=None , UpperCAmelCase_=False , UpperCAmelCase_=0.0 , **UpperCAmelCase_ , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
lowercase__: Union[str, Any] = vocab_size
lowercase__: List[Any] = hidden_size
lowercase__: List[Any] = num_hidden_layers
lowercase__: Tuple = num_attention_heads
lowercase__: Optional[int] = intermediate_size
lowercase__: List[Any] = hidden_act
lowercase__: Optional[Any] = hidden_dropout_prob
lowercase__: str = attention_probs_dropout_prob
lowercase__: Tuple = max_position_embeddings
lowercase__: str = initializer_range
lowercase__: List[Any] = layer_norm_eps
lowercase__: List[str] = classifier_dropout
lowercase__: Optional[Any] = is_decoder
lowercase__: Tuple = act_dropout
| 120
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = CycleDiffusionPipeline
_UpperCamelCase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"negative_prompt",
"height",
"width",
"negative_prompt_embeds",
}
_UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"latents"}
_UpperCamelCase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} )
_UpperCamelCase : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __a ( self ):
torch.manual_seed(0 )
_lowercase : Optional[int] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
_lowercase : int = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , num_train_timesteps=1_0_0_0 , clip_sample=_lowerCAmelCase , set_alpha_to_one=_lowerCAmelCase , )
torch.manual_seed(0 )
_lowercase : Dict = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
_lowercase : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
_lowercase : List[str] = CLIPTextModel(_lowerCAmelCase )
_lowercase : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowercase : Any = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __a ( self , _lowerCAmelCase , _lowerCAmelCase=0 ):
_lowercase : Any = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
_lowercase : str = image / 2 + 0.5
if str(_lowerCAmelCase ).startswith('mps' ):
_lowercase : List[Any] = torch.manual_seed(_lowerCAmelCase )
else:
_lowercase : Optional[int] = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
_lowercase : Any = {
'prompt': 'An astronaut riding an elephant',
'source_prompt': 'An astronaut riding a horse',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'eta': 0.1,
'strength': 0.8,
'guidance_scale': 3,
'source_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def __a ( self ):
_lowercase : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : Any = self.get_dummy_components()
_lowercase : Optional[Any] = CycleDiffusionPipeline(**_lowerCAmelCase )
_lowercase : Tuple = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowercase : List[str] = self.get_dummy_inputs(_lowerCAmelCase )
_lowercase : List[Any] = pipe(**_lowerCAmelCase )
_lowercase : Dict = output.images
_lowercase : Tuple = images[0, -3:, -3:, -1]
assert images.shape == (1, 3_2, 3_2, 3)
_lowercase : Any = np.array([0.44_59, 0.49_43, 0.45_44, 0.66_43, 0.54_74, 0.43_27, 0.57_01, 0.59_59, 0.51_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def __a ( self ):
_lowercase : Optional[int] = self.get_dummy_components()
for name, module in components.items():
if hasattr(_lowerCAmelCase , 'half' ):
_lowercase : Any = module.half()
_lowercase : str = CycleDiffusionPipeline(**_lowerCAmelCase )
_lowercase : str = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowercase : Any = self.get_dummy_inputs(_lowerCAmelCase )
_lowercase : str = pipe(**_lowerCAmelCase )
_lowercase : List[Any] = output.images
_lowercase : str = images[0, -3:, -3:, -1]
assert images.shape == (1, 3_2, 3_2, 3)
_lowercase : Optional[Any] = np.array([0.35_06, 0.45_43, 0.4_46, 0.45_75, 0.51_95, 0.41_55, 0.52_73, 0.5_18, 0.41_16] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def __a ( self ):
return super().test_save_load_local()
@unittest.skip('non-deterministic pipeline' )
def __a ( self ):
return super().test_inference_batch_single_identical()
@skip_mps
def __a ( self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def __a ( self ):
return super().test_save_load_optional_components()
@skip_mps
def __a ( self ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def __a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ):
_lowercase : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
_lowercase : List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy' )
_lowercase : List[str] = init_image.resize((5_1_2, 5_1_2) )
_lowercase : Any = 'CompVis/stable-diffusion-v1-4'
_lowercase : Any = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder='scheduler' )
_lowercase : Tuple = CycleDiffusionPipeline.from_pretrained(
_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , torch_dtype=torch.floataa , revision='fp16' )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
_lowercase : Optional[Any] = 'A black colored car'
_lowercase : Union[str, Any] = 'A blue colored car'
_lowercase : List[str] = torch.manual_seed(0 )
_lowercase : Union[str, Any] = pipe(
prompt=_lowerCAmelCase , source_prompt=_lowerCAmelCase , image=_lowerCAmelCase , num_inference_steps=1_0_0 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowerCAmelCase , output_type='np' , )
_lowercase : List[str] = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def __a ( self ):
_lowercase : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
_lowercase : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy' )
_lowercase : List[str] = init_image.resize((5_1_2, 5_1_2) )
_lowercase : Optional[int] = 'CompVis/stable-diffusion-v1-4'
_lowercase : Optional[Any] = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder='scheduler' )
_lowercase : List[str] = CycleDiffusionPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
_lowercase : Tuple = 'A black colored car'
_lowercase : Optional[Any] = 'A blue colored car'
_lowercase : Optional[Any] = torch.manual_seed(0 )
_lowercase : Union[str, Any] = pipe(
prompt=_lowerCAmelCase , source_prompt=_lowerCAmelCase , image=_lowerCAmelCase , num_inference_steps=1_0_0 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowerCAmelCase , output_type='np' , )
_lowercase : List[str] = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 66
|
from PIL import Image
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Image:
def brightness(SCREAMING_SNAKE_CASE ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
UpperCamelCase = change_brightness(img, 100)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 66
| 1
|
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
A =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
A =' def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n'
class _a ( unittest.TestCase ):
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , '''models/bert/''' ) )
UpperCAmelCase = self.transformer_dir
shutil.copy(
os.path.join(lowercase , '''src/transformers/models/bert/modeling_bert.py''' ) , os.path.join(self.transformer_dir , '''models/bert/modeling_bert.py''' ) , )
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = '''src/transformers'''
shutil.rmtree(self.transformer_dir )
def A ( self : str , lowercase : int , lowercase : Optional[int] , lowercase : Tuple , lowercase : Any=None ):
'''simple docstring'''
UpperCAmelCase = comment + f"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
UpperCAmelCase = comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result
UpperCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
UpperCAmelCase = black.format_str(lowercase , mode=lowercase )
UpperCAmelCase = os.path.join(self.transformer_dir , '''new_code.py''' )
with open(lowercase , '''w''' , newline='''\n''' ) as f:
f.write(lowercase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowercase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowercase )
with open(lowercase , '''r''' ) as f:
self.assertTrue(f.read() , lowercase )
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' )
self.assertEqual(lowercase , lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , lowercase , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , re.sub('''Bert''' , '''TestModel''' , lowercase ) , )
# Copy consistency with a really long name
UpperCAmelCase = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
f"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , f"{long_class_name}LMPredictionHead" , re.sub('''Bert''' , lowercase , lowercase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , lowercase , overwrite_result=re.sub('''Bert''' , '''TestModel''' , lowercase ) , )
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = check_copies.LOCALIZED_READMES['''README_zh-hans.md''']
UpperCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'''
''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'''
''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'''
''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'''
''' Luong, Quoc V. Le, Christopher D. Manning.'''
)
UpperCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
UpperCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'''
''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'''
''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'''
''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'''
''' Christopher D. Manning 发布。\n'''
)
UpperCAmelCase , UpperCAmelCase = check_copies.convert_to_localized_md(
lowercase , lowercase , localized_readme['''format_model_list'''] )
self.assertFalse(lowercase )
self.assertEqual(lowercase , lowercase )
UpperCAmelCase , UpperCAmelCase = check_copies.convert_to_localized_md(
lowercase , lowercase , localized_readme['''format_model_list'''] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(lowercase )
UpperCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'''
)
UpperCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'''
''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
UpperCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
UpperCAmelCase , UpperCAmelCase = check_copies.convert_to_localized_md(
lowercase , lowercase , localized_readme['''format_model_list'''] )
# Check if the model link is synchronized.
self.assertEqual(lowercase , lowercase )
| 358
|
'''simple docstring'''
from typing import Any
class _a :
def __init__( self : int , lowercase : Any ):
'''simple docstring'''
UpperCAmelCase = data
UpperCAmelCase = None
class _a :
def __init__( self : int ):
'''simple docstring'''
UpperCAmelCase = None
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = self.head
while temp is not None:
print(temp.data , end=''' ''' )
UpperCAmelCase = temp.next
print()
def A ( self : List[str] , lowercase : Any ):
'''simple docstring'''
UpperCAmelCase = Node(lowercase )
UpperCAmelCase = self.head
UpperCAmelCase = new_node
def A ( self : List[Any] , lowercase : List[Any] , lowercase : str ):
'''simple docstring'''
if node_data_a == node_data_a:
return
else:
UpperCAmelCase = self.head
while node_a is not None and node_a.data != node_data_a:
UpperCAmelCase = node_a.next
UpperCAmelCase = self.head
while node_a is not None and node_a.data != node_data_a:
UpperCAmelCase = node_a.next
if node_a is None or node_a is None:
return
UpperCAmelCase , UpperCAmelCase = node_a.data, node_a.data
if __name__ == "__main__":
A =LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('After swapping')
ll.print_list()
| 358
| 1
|
from manim import *
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
def _a ( self ):
lowerCAmelCase_: List[str] = Rectangle(height=0.5 , width=0.5 )
lowerCAmelCase_: List[str] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
lowerCAmelCase_: Optional[int] = Rectangle(height=0.2_5 , width=0.2_5 )
lowerCAmelCase_: int = [mem.copy() for i in range(6 )]
lowerCAmelCase_: Optional[int] = [mem.copy() for i in range(6 )]
lowerCAmelCase_: Optional[Any] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
lowerCAmelCase_: List[str] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
lowerCAmelCase_: Optional[int] = VGroup(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
lowerCAmelCase_: Dict = Text("CPU" , font_size=24 )
lowerCAmelCase_: List[str] = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase__ )
lowerCAmelCase_: Optional[Any] = [mem.copy() for i in range(4 )]
lowerCAmelCase_: Any = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
lowerCAmelCase_: List[str] = Text("GPU" , font_size=24 )
lowerCAmelCase_: str = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCamelCase__ )
lowerCAmelCase_: Optional[Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase_: Dict = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
lowerCAmelCase_: List[Any] = Text("Model" , font_size=24 )
lowerCAmelCase_: List[str] = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
model.move_to([3, -1.0, 0] )
self.add(lowerCamelCase__ )
lowerCAmelCase_: Any = []
lowerCAmelCase_: List[str] = []
for i, rect in enumerate(lowerCamelCase__ ):
lowerCAmelCase_: str = fill.copy().set_fill(lowerCamelCase__ , opacity=0.8 )
target.move_to(lowerCamelCase__ )
model_arr.append(lowerCamelCase__ )
lowerCAmelCase_: Optional[Any] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase__ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(lowerCamelCase__ )
self.add(*lowerCamelCase__ , *lowerCamelCase__ )
lowerCAmelCase_: str = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase_: List[str] = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase_: Tuple = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
lowerCAmelCase_: Union[str, Any] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
lowerCAmelCase_: List[Any] = VGroup(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
lowerCAmelCase_: int = Text("Disk" , font_size=24 )
lowerCAmelCase_: Any = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
disk.move_to([-4, -1.2_5, 0] )
self.add(lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase_: Dict = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase_: List[Any] = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase_: List[str] = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(lowerCamelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(lowerCamelCase__ )
lowerCAmelCase_: Dict = MarkupText(
F'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase__ ) )
lowerCAmelCase_: List[Any] = Square(0.3 )
input.set_fill(lowerCamelCase__ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , lowerCamelCase__ , buff=0.5 )
self.play(Write(lowerCamelCase__ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=lowerCamelCase__ , buff=0.0_2 )
self.play(MoveToTarget(lowerCamelCase__ ) )
self.play(FadeOut(lowerCamelCase__ ) )
lowerCAmelCase_: List[Any] = Arrow(start=lowerCamelCase__ , end=lowerCamelCase__ , color=lowerCamelCase__ , buff=0.5 )
a.next_to(model_arr[0].get_left() , lowerCamelCase__ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
lowerCAmelCase_: Optional[int] = MarkupText(
F'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase__ , run_time=3 ) )
lowerCAmelCase_: str = {"run_time": 1, "fade_in": True, "fade_out": True, "buff": 0.0_2}
self.play(
Write(lowerCamelCase__ ) , Circumscribe(model_arr[0] , color=lowerCamelCase__ , **lowerCamelCase__ ) , Circumscribe(model_cpu_arr[0] , color=lowerCamelCase__ , **lowerCamelCase__ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase__ , **lowerCamelCase__ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
lowerCAmelCase_: int = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.0_2 , lowerCamelCase__ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.0_2 )
lowerCAmelCase_: List[Any] = AnimationGroup(
FadeOut(lowerCamelCase__ , run_time=0.5 ) , MoveToTarget(lowerCamelCase__ , run_time=0.5 ) , FadeIn(lowerCamelCase__ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(lowerCamelCase__ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
lowerCAmelCase_: Any = 0.7
self.play(
Circumscribe(model_arr[i] , **lowerCamelCase__ ) , Circumscribe(cpu_left_col_base[i] , **lowerCamelCase__ ) , Circumscribe(cpu_left_col_base[i + 1] , color=lowerCamelCase__ , **lowerCamelCase__ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase__ , **lowerCamelCase__ ) , Circumscribe(model_arr[i + 1] , color=lowerCamelCase__ , **lowerCamelCase__ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.0_2 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=lowerCamelCase__ , **lowerCamelCase__ ) , Circumscribe(cpu_left_col_base[-1] , color=lowerCamelCase__ , **lowerCamelCase__ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase__ , **lowerCamelCase__ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
lowerCAmelCase_: Tuple = a_c
lowerCAmelCase_: Optional[Any] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.0_2 , buff=0.5 )
self.play(
FadeOut(lowerCamelCase__ ) , FadeOut(lowerCamelCase__ , run_time=0.5 ) , )
lowerCAmelCase_: Tuple = MarkupText(F'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase__ , run_time=3 ) , MoveToTarget(lowerCamelCase__ ) )
self.wait()
| 613
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
a : List[str] = False
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ):
lowerCAmelCase_: List[str] = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowerCAmelCase_: Tuple = "A painting of a squirrel eating a burger "
lowerCAmelCase_: List[str] = torch.manual_seed(0 )
lowerCAmelCase_: Dict = pipe(
prompt=lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase__ )
lowerCAmelCase_: int = VersatileDiffusionTextToImagePipeline.from_pretrained(lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowerCAmelCase_: List[str] = generator.manual_seed(0 )
lowerCAmelCase_: Dict = pipe(
prompt=lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _a ( self ):
lowerCAmelCase_: Optional[int] = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowerCAmelCase_: str = "A painting of a squirrel eating a burger "
lowerCAmelCase_: int = torch.manual_seed(0 )
lowerCAmelCase_: List[str] = pipe(
prompt=lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
lowerCAmelCase_: List[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase_: Optional[int] = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 613
| 1
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
_A : int =logging.get_logger(__name__)
_A : Tuple ={
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class _lowercase ( _lowercase ):
a = """dpt"""
def __init__( self: Union[str, Any] , UpperCamelCase__: List[str]=768 , UpperCamelCase__: List[str]=12 , UpperCamelCase__: Optional[Any]=12 , UpperCamelCase__: int=3_072 , UpperCamelCase__: List[Any]="gelu" , UpperCamelCase__: Optional[int]=0.0 , UpperCamelCase__: Dict=0.0 , UpperCamelCase__: List[str]=0.02 , UpperCamelCase__: int=1e-12 , UpperCamelCase__: Dict=384 , UpperCamelCase__: int=16 , UpperCamelCase__: str=3 , UpperCamelCase__: Any=False , UpperCamelCase__: Tuple=True , UpperCamelCase__: Optional[Any]=[2, 5, 8, 11] , UpperCamelCase__: Optional[Any]="project" , UpperCamelCase__: Tuple=[4, 2, 1, 0.5] , UpperCamelCase__: Union[str, Any]=[96, 192, 384, 768] , UpperCamelCase__: Optional[Any]=256 , UpperCamelCase__: List[Any]=-1 , UpperCamelCase__: Optional[Any]=False , UpperCamelCase__: List[str]=True , UpperCamelCase__: List[Any]=0.4 , UpperCamelCase__: Union[str, Any]=255 , UpperCamelCase__: int=0.1 , UpperCamelCase__: Any=[1, 1_024, 24, 24] , UpperCamelCase__: Optional[int]=[0, 1] , UpperCamelCase__: Tuple=None , **UpperCamelCase__: Optional[Any] , ):
super().__init__(**UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = hidden_size
lowerCamelCase__ : str = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("""Initializing the config with a `BiT` backbone.""" )
lowerCamelCase__ : List[str] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
}
lowerCamelCase__ : Union[str, Any] = BitConfig(**UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
logger.info("""Initializing the config with a `BiT` backbone.""" )
lowerCamelCase__ : List[str] = BitConfig(**UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : Dict = backbone_config
else:
raise ValueError(
F'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
lowerCamelCase__ : List[str] = backbone_featmap_shape
lowerCamelCase__ : List[str] = neck_ignore_stages
if readout_type != "project":
raise ValueError("""Readout type must be 'project' when using `DPT-hybrid` mode.""" )
else:
lowerCamelCase__ : Union[str, Any] = None
lowerCamelCase__ : str = None
lowerCamelCase__ : List[Any] = []
lowerCamelCase__ : Any = num_hidden_layers
lowerCamelCase__ : Tuple = num_attention_heads
lowerCamelCase__ : Optional[int] = intermediate_size
lowerCamelCase__ : Dict = hidden_act
lowerCamelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : Optional[Any] = layer_norm_eps
lowerCamelCase__ : str = image_size
lowerCamelCase__ : Optional[Any] = patch_size
lowerCamelCase__ : List[str] = num_channels
lowerCamelCase__ : str = qkv_bias
lowerCamelCase__ : List[str] = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("""Readout_type must be one of ['ignore', 'add', 'project']""" )
lowerCamelCase__ : Union[str, Any] = readout_type
lowerCamelCase__ : Optional[Any] = reassemble_factors
lowerCamelCase__ : Optional[int] = neck_hidden_sizes
lowerCamelCase__ : int = fusion_hidden_size
lowerCamelCase__ : Dict = head_in_index
lowerCamelCase__ : Dict = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
lowerCamelCase__ : Union[str, Any] = use_auxiliary_head
lowerCamelCase__ : Tuple = auxiliary_loss_weight
lowerCamelCase__ : List[str] = semantic_loss_ignore_index
lowerCamelCase__ : Optional[int] = semantic_classifier_dropout
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : List[str] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCamelCase__ : Optional[int] = self.backbone_config.to_dict()
lowerCamelCase__ : Any = self.__class__.model_type
return output
| 631
|
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 631
| 1
|
__snake_case : int = {
"""Pillow""": """Pillow""",
"""accelerate""": """accelerate>=0.11.0""",
"""compel""": """compel==0.1.8""",
"""black""": """black~=23.1""",
"""datasets""": """datasets""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.13.2""",
"""requests-mock""": """requests-mock==1.10.0""",
"""importlib_metadata""": """importlib_metadata""",
"""invisible-watermark""": """invisible-watermark""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2""",
"""jaxlib""": """jaxlib>=0.1.65""",
"""Jinja2""": """Jinja2""",
"""k-diffusion""": """k-diffusion>=0.0.12""",
"""torchsde""": """torchsde""",
"""note_seq""": """note_seq""",
"""librosa""": """librosa""",
"""numpy""": """numpy""",
"""omegaconf""": """omegaconf""",
"""parameterized""": """parameterized""",
"""protobuf""": """protobuf>=3.20.3,<4""",
"""pytest""": """pytest""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""ruff""": """ruff>=0.0.241""",
"""safetensors""": """safetensors""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""scipy""": """scipy""",
"""onnx""": """onnx""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""tensorboard""": """tensorboard""",
"""torch""": """torch>=1.4""",
"""torchvision""": """torchvision""",
"""transformers""": """transformers>=4.25.1""",
"""urllib3""": """urllib3<=2.0.0""",
}
| 540
|
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__snake_case : Optional[int] = logging.getLogger(__name__)
class A__(a_ ):
"""simple docstring"""
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase=None , _lowercase=None ) -> Tuple:
a_ : str = self.layer[current_layer](_lowercase , _lowercase , head_mask[current_layer] )
a_ : str = layer_outputs[0]
return hidden_states
@add_start_docstrings(
'''The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.''', a_, )
class A__(a_ ):
"""simple docstring"""
def __init__( self , _lowercase ) -> List[str]:
super().__init__(_lowercase )
a_ : Tuple = BertEncoderWithPabee(_lowercase )
self.init_weights()
a_ : int = 0
a_ : Any = 0
a_ : Tuple = 0
a_ : Optional[int] = 0
def UpperCamelCase__ ( self , _lowercase ) -> Tuple:
a_ : Dict = threshold
def UpperCamelCase__ ( self , _lowercase ) -> List[Any]:
a_ : Optional[int] = patience
def UpperCamelCase__ ( self ) -> Dict:
a_ : str = 0
a_ : Optional[int] = 0
def UpperCamelCase__ ( self ) -> List[Any]:
a_ : Union[str, Any] = self.inference_layers_num / self.inference_instances_num
a_ : Optional[int] = (
F'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
F''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(_lowercase )
@add_start_docstrings_to_model_forward(_lowercase )
def UpperCamelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=False , ) -> str:
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
a_ : Dict = input_ids.size()
elif inputs_embeds is not None:
a_ : Dict = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
a_ : Optional[Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
a_ : Tuple = torch.ones(_lowercase , device=_lowercase )
if token_type_ids is None:
a_ : List[str] = torch.zeros(_lowercase , dtype=torch.long , device=_lowercase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
a_ : torch.Tensor = self.get_extended_attention_mask(_lowercase , _lowercase , _lowercase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
a_ , a_ , a_ : int = encoder_hidden_states.size()
a_ : List[str] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
a_ : Tuple = torch.ones(_lowercase , device=_lowercase )
a_ : List[Any] = self.invert_attention_mask(_lowercase )
else:
a_ : Optional[Any] = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
a_ : List[Any] = self.get_head_mask(_lowercase , self.config.num_hidden_layers )
a_ : List[str] = self.embeddings(
input_ids=_lowercase , position_ids=_lowercase , token_type_ids=_lowercase , inputs_embeds=_lowercase )
a_ : List[Any] = embedding_output
if self.training:
a_ : Any = []
for i in range(self.config.num_hidden_layers ):
a_ : int = self.encoder.adaptive_forward(
_lowercase , current_layer=_lowercase , attention_mask=_lowercase , head_mask=_lowercase )
a_ : List[Any] = self.pooler(_lowercase )
a_ : Optional[int] = output_layers[i](output_dropout(_lowercase ) )
res.append(_lowercase )
elif self.patience == 0: # Use all layers for inference
a_ : Union[str, Any] = self.encoder(
_lowercase , attention_mask=_lowercase , head_mask=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , )
a_ : Union[str, Any] = self.pooler(encoder_outputs[0] )
a_ : List[str] = [output_layers[self.config.num_hidden_layers - 1](_lowercase )]
else:
a_ : Any = 0
a_ : Dict = None
a_ : Tuple = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
a_ : Optional[Any] = self.encoder.adaptive_forward(
_lowercase , current_layer=_lowercase , attention_mask=_lowercase , head_mask=_lowercase )
a_ : Optional[int] = self.pooler(_lowercase )
a_ : int = output_layers[i](_lowercase )
if regression:
a_ : Dict = logits.detach()
if patient_result is not None:
a_ : Optional[Any] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
a_ : Optional[Any] = 0
else:
a_ : str = logits.detach().argmax(dim=1 )
if patient_result is not None:
a_ : str = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_lowercase ) ):
patient_counter += 1
else:
a_ : Tuple = 0
a_ : Union[str, Any] = logits
if patient_counter == self.patience:
break
a_ : str = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'''Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. ''', a_, )
class A__(a_ ):
"""simple docstring"""
def __init__( self , _lowercase ) -> str:
super().__init__(_lowercase )
a_ : str = config.num_labels
a_ : Optional[Any] = BertModelWithPabee(_lowercase )
a_ : int = nn.Dropout(config.hidden_dropout_prob )
a_ : str = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_lowercase )
def UpperCamelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> Tuple:
a_ : Optional[Any] = self.bert(
input_ids=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , position_ids=_lowercase , head_mask=_lowercase , inputs_embeds=_lowercase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
a_ : Optional[Any] = (logits[-1],)
if labels is not None:
a_ : int = None
a_ : Union[str, Any] = 0
for ix, logits_item in enumerate(_lowercase ):
if self.num_labels == 1:
# We are doing regression
a_ : Any = MSELoss()
a_ : Union[str, Any] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
a_ : Any = CrossEntropyLoss()
a_ : int = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
a_ : str = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
a_ : Any = (total_loss / total_weights,) + outputs
return outputs
| 540
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_UpperCamelCase = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 363
|
"""simple docstring"""
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __lowercase ( self :Optional[int] , __lowercase :Union[str, Any] , __lowercase :List[Any] ):
pass
def __lowercase ( self :Any ):
pass
def __lowercase ( self :str ):
pass
def __lowercase ( self :str , __lowercase :Dict , __lowercase :List[Any] , __lowercase :Union[str, Any] , __lowercase :Union[str, Any] , __lowercase :Any=None , **__lowercase :Tuple ):
__lowerCamelCase : Optional[Any] =VisionTextDualEncoderConfig.from_vision_text_configs(__lowercase , __lowercase )
__lowerCamelCase : Dict =TFVisionTextDualEncoderModel(__lowercase )
__lowerCamelCase : List[Any] =model(input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) )
def __lowercase ( self :str , __lowercase :Any , __lowercase :Dict , __lowercase :Any , __lowercase :int , __lowercase :str=None , **__lowercase :List[str] ):
__lowerCamelCase , __lowerCamelCase : Any =self.get_vision_text_model(__lowercase , __lowercase )
__lowerCamelCase : List[str] =TFVisionTextDualEncoderModel(vision_model=__lowercase , text_model=__lowercase )
__lowerCamelCase : Optional[int] =model(input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __lowercase ( self :int , __lowercase :Dict , __lowercase :List[Any] , __lowercase :Any , __lowercase :Optional[Any] , __lowercase :Tuple=None , **__lowercase :List[Any] ):
__lowerCamelCase , __lowerCamelCase : int =self.get_vision_text_model(__lowercase , __lowercase )
__lowerCamelCase : Dict ={'''vision_model''': vision_model, '''text_model''': text_model}
__lowerCamelCase : List[Any] =TFVisionTextDualEncoderModel.from_vision_text_pretrained(**__lowercase )
__lowerCamelCase : Optional[Any] =model(input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __lowercase ( self :Optional[Any] , __lowercase :Dict , __lowercase :int , __lowercase :Dict , __lowercase :List[Any] , __lowercase :Tuple=None , **__lowercase :Optional[Any] ):
__lowerCamelCase , __lowerCamelCase : Tuple =self.get_vision_text_model(__lowercase , __lowercase )
__lowerCamelCase : List[Any] =TFVisionTextDualEncoderModel(vision_model=__lowercase , text_model=__lowercase )
__lowerCamelCase : Any =model(input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase )
__lowerCamelCase : Optional[Any] =output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowercase )
__lowerCamelCase : Dict =TFVisionTextDualEncoderModel.from_pretrained(__lowercase )
__lowerCamelCase : Optional[Any] =model(input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase )
__lowerCamelCase : Any =after_output[0].numpy()
__lowerCamelCase : Tuple =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowercase , 1e-5 )
def __lowercase ( self :int , __lowercase :Any , __lowercase :List[Any] , __lowercase :Any , __lowercase :str , __lowercase :int=None , **__lowercase :List[Any] ):
__lowerCamelCase , __lowerCamelCase : Any =self.get_vision_text_model(__lowercase , __lowercase )
__lowerCamelCase : Union[str, Any] =TFVisionTextDualEncoderModel(vision_model=__lowercase , text_model=__lowercase )
__lowerCamelCase : Any =model(
input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase , output_attentions=__lowercase )
__lowerCamelCase : Optional[Any] =output.vision_model_output.attentions
self.assertEqual(len(__lowercase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowerCamelCase : Dict =to_atuple(vision_model.config.image_size )
__lowerCamelCase : Tuple =to_atuple(vision_model.config.patch_size )
__lowerCamelCase : Tuple =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowerCamelCase : Any =num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowerCamelCase : List[Any] =output.text_model_output.attentions
self.assertEqual(len(__lowercase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __lowercase ( self :List[Any] , __lowercase :np.ndarray , __lowercase :np.ndarray , __lowercase :float ):
__lowerCamelCase : Dict =np.abs((a - b) ).max()
self.assertLessEqual(__lowercase , __lowercase , f'Difference between torch and flax is {diff} (>= {tol}).' )
def __lowercase ( self :List[str] ):
__lowerCamelCase : Any =self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**__lowercase )
def __lowercase ( self :Tuple ):
__lowerCamelCase : List[Any] =self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__lowercase )
def __lowercase ( self :Union[str, Any] ):
__lowerCamelCase : Dict =self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__lowercase )
def __lowercase ( self :Any ):
__lowerCamelCase : List[Any] =self.prepare_config_and_inputs()
self.check_save_load(**__lowercase )
def __lowercase ( self :List[str] ):
__lowerCamelCase : Any =self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__lowercase )
@slow
def __lowercase ( self :Tuple ):
__lowerCamelCase , __lowerCamelCase : Optional[Any] =self.get_pretrained_model_and_inputs()
__lowerCamelCase : Optional[Any] =model_a(**__lowercase )
__lowerCamelCase : int =outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__lowercase )
__lowerCamelCase : List[Any] =TFVisionTextDualEncoderModel.from_pretrained(__lowercase )
__lowerCamelCase : Tuple =model_a(**__lowercase )
__lowerCamelCase : Optional[int] =after_outputs[0].numpy()
__lowerCamelCase : Optional[Any] =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowercase , 1e-5 )
@require_tf
class SCREAMING_SNAKE_CASE_ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self :List[Any] ):
__lowerCamelCase : Union[str, Any] =TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-random-bert''' )
__lowerCamelCase : Any =13
__lowerCamelCase : Optional[int] =floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowerCamelCase : Dict =ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowerCamelCase : Optional[Any] =random_attention_mask([batch_size, 4] )
__lowerCamelCase : Dict ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def __lowercase ( self :Optional[Any] , __lowercase :List[Any] , __lowercase :List[str] ):
__lowerCamelCase : Optional[Any] =TFViTModel(__lowercase , name='''vision_model''' )
__lowerCamelCase : Union[str, Any] =TFBertModel(__lowercase , name='''text_model''' )
return vision_model, text_model
def __lowercase ( self :Union[str, Any] ):
__lowerCamelCase : str =TFViTModelTester(self )
__lowerCamelCase : Any =TFBertModelTester(self )
__lowerCamelCase : Optional[Any] =vit_model_tester.prepare_config_and_inputs()
__lowerCamelCase : Tuple =bert_model_tester.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Dict =vision_config_and_inputs
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : int =text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class SCREAMING_SNAKE_CASE_ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self :Optional[Any] ):
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
__lowerCamelCase : List[Any] =TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-deit-tf''' , '''hf-internal-testing/tiny-random-roberta''' )
__lowerCamelCase : Tuple =13
__lowerCamelCase : str =floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowerCamelCase : Any =ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowerCamelCase : Optional[int] =random_attention_mask([batch_size, 4] )
__lowerCamelCase : Optional[Any] ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def __lowercase ( self :Any , __lowercase :Tuple , __lowercase :List[Any] , __lowercase :Union[str, Any] , __lowercase :Any , __lowercase :int=None , **__lowercase :List[str] ):
__lowerCamelCase , __lowerCamelCase : Union[str, Any] =self.get_vision_text_model(__lowercase , __lowercase )
__lowerCamelCase : Optional[int] =TFVisionTextDualEncoderModel(vision_model=__lowercase , text_model=__lowercase )
__lowerCamelCase : Any =model(
input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase , output_attentions=__lowercase )
__lowerCamelCase : str =output.vision_model_output.attentions
self.assertEqual(len(__lowercase ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__lowerCamelCase : int =to_atuple(vision_model.config.image_size )
__lowerCamelCase : Union[str, Any] =to_atuple(vision_model.config.patch_size )
__lowerCamelCase : Tuple =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowerCamelCase : Any =num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowerCamelCase : Any =output.text_model_output.attentions
self.assertEqual(len(__lowercase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __lowercase ( self :Any , __lowercase :Any , __lowercase :Optional[Any] ):
__lowerCamelCase : str =TFDeiTModel(__lowercase , name='''vision_model''' )
__lowerCamelCase : List[str] =TFRobertaModel(__lowercase , name='''text_model''' )
return vision_model, text_model
def __lowercase ( self :Dict ):
__lowerCamelCase : Optional[int] =TFDeiTModelTester(self )
__lowerCamelCase : Any =TFRobertaModelTester(self )
__lowerCamelCase : Dict =vit_model_tester.prepare_config_and_inputs()
__lowerCamelCase : Optional[int] =bert_model_tester.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[int] =vision_config_and_inputs
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : List[Any] =text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class SCREAMING_SNAKE_CASE_ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self :Tuple ):
__lowerCamelCase : str =TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-clip-tf''' , '''hf-internal-testing/tiny-random-bert''' )
__lowerCamelCase : str =13
__lowerCamelCase : int =floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowerCamelCase : int =ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowerCamelCase : str =random_attention_mask([batch_size, 4] )
__lowerCamelCase : Optional[Any] ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def __lowercase ( self :List[Any] , __lowercase :str , __lowercase :Tuple ):
__lowerCamelCase : int =TFCLIPVisionModel(__lowercase , name='''vision_model''' )
__lowerCamelCase : Optional[Any] =TFBertModel(__lowercase , name='''text_model''' )
return vision_model, text_model
def __lowercase ( self :Tuple ):
__lowerCamelCase : List[str] =TFCLIPVisionModelTester(self )
__lowerCamelCase : Union[str, Any] =TFBertModelTester(self )
__lowerCamelCase : str =clip_model_tester.prepare_config_and_inputs()
__lowerCamelCase : Tuple =bert_model_tester.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase : Any =vision_config_and_inputs
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : str =text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self :List[Any] ):
__lowerCamelCase : Union[str, Any] =TFVisionTextDualEncoderModel.from_pretrained(
'''clip-italian/clip-italian''' , logit_scale_init_value=1.0 , from_pt=__lowercase )
__lowerCamelCase : str =VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
__lowerCamelCase : Optional[Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
__lowerCamelCase : int =processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=__lowercase , padding=__lowercase , return_tensors='''np''' )
__lowerCamelCase : str =model(**__lowercase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowerCamelCase : int =np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , __lowercase , atol=1e-3 ) )
| 363
| 1
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
lowerCamelCase__ : int = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : int , _lowerCAmelCase : WhisperForConditionalGeneration , _lowerCAmelCase : WhisperProcessor , _lowerCAmelCase : AutoencoderKL , _lowerCAmelCase : CLIPTextModel , _lowerCAmelCase : CLIPTokenizer , _lowerCAmelCase : UNetaDConditionModel , _lowerCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , _lowerCAmelCase : StableDiffusionSafetyChecker , _lowerCAmelCase : CLIPImageProcessor , ):
super().__init__()
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
speech_model=_lowerCAmelCase , speech_processor=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , )
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
SCREAMING_SNAKE_CASE_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple ):
self.enable_attention_slicing(_lowerCAmelCase )
@torch.no_grad()
def __call__( self : Dict , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict=16_000 , _lowerCAmelCase : int = 512 , _lowerCAmelCase : int = 512 , _lowerCAmelCase : int = 50 , _lowerCAmelCase : float = 7.5 , _lowerCAmelCase : Optional[Union[str, List[str]]] = None , _lowerCAmelCase : Optional[int] = 1 , _lowerCAmelCase : float = 0.0 , _lowerCAmelCase : Optional[torch.Generator] = None , _lowerCAmelCase : Optional[torch.FloatTensor] = None , _lowerCAmelCase : Optional[str] = "pil" , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _lowerCAmelCase : int = 1 , **_lowerCAmelCase : List[str] , ):
SCREAMING_SNAKE_CASE_ = self.speech_processor.feature_extractor(
_lowerCAmelCase , return_tensors='pt' , sampling_rate=_lowerCAmelCase ).input_features.to(self.device )
SCREAMING_SNAKE_CASE_ = self.speech_model.generate(_lowerCAmelCase , max_length=480_000 )
SCREAMING_SNAKE_CASE_ = self.speech_processor.tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , normalize=_lowerCAmelCase )[
0
]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = 1
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = len(_lowerCAmelCase )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(_lowerCAmelCase )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(_lowerCAmelCase )}." )
# get prompt text embeddings
SCREAMING_SNAKE_CASE_ = self.tokenizer(
_lowerCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
SCREAMING_SNAKE_CASE_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
SCREAMING_SNAKE_CASE_ = text_input_ids[:, : self.tokenizer.model_max_length]
SCREAMING_SNAKE_CASE_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = text_embeddings.shape
SCREAMING_SNAKE_CASE_ = text_embeddings.repeat(1 , _lowerCAmelCase , 1 )
SCREAMING_SNAKE_CASE_ = text_embeddings.view(bs_embed * num_images_per_prompt , _lowerCAmelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
SCREAMING_SNAKE_CASE_ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ = 42
if negative_prompt is None:
SCREAMING_SNAKE_CASE_ = [''] * batch_size
elif type(_lowerCAmelCase ) is not type(_lowerCAmelCase ):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(_lowerCAmelCase )} !="
F" {type(_lowerCAmelCase )}." )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = [negative_prompt]
elif batch_size != len(_lowerCAmelCase ):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(_lowerCAmelCase )}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.' )
else:
SCREAMING_SNAKE_CASE_ = negative_prompt
SCREAMING_SNAKE_CASE_ = text_input_ids.shape[-1]
SCREAMING_SNAKE_CASE_ = self.tokenizer(
_lowerCAmelCase , padding='max_length' , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors='pt' , )
SCREAMING_SNAKE_CASE_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE_ = uncond_embeddings.shape[1]
SCREAMING_SNAKE_CASE_ = uncond_embeddings.repeat(1 , _lowerCAmelCase , 1 )
SCREAMING_SNAKE_CASE_ = uncond_embeddings.view(batch_size * num_images_per_prompt , _lowerCAmelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE_ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
SCREAMING_SNAKE_CASE_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
SCREAMING_SNAKE_CASE_ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
SCREAMING_SNAKE_CASE_ = torch.randn(_lowerCAmelCase , generator=_lowerCAmelCase , device='cpu' , dtype=_lowerCAmelCase ).to(
self.device )
else:
SCREAMING_SNAKE_CASE_ = torch.randn(_lowerCAmelCase , generator=_lowerCAmelCase , device=self.device , dtype=_lowerCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
SCREAMING_SNAKE_CASE_ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_lowerCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
SCREAMING_SNAKE_CASE_ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
SCREAMING_SNAKE_CASE_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
SCREAMING_SNAKE_CASE_ = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
SCREAMING_SNAKE_CASE_ = {}
if accepts_eta:
SCREAMING_SNAKE_CASE_ = eta
for i, t in enumerate(self.progress_bar(_lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE_ = self.scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase )
# predict the noise residual
SCREAMING_SNAKE_CASE_ = self.unet(_lowerCAmelCase , _lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase ).sample
# perform guidance
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE_ = self.scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = 1 / 0.1_8215 * latents
SCREAMING_SNAKE_CASE_ = self.vae.decode(_lowerCAmelCase ).sample
SCREAMING_SNAKE_CASE_ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
SCREAMING_SNAKE_CASE_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(_lowerCAmelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=_lowerCAmelCase , nsfw_content_detected=_lowerCAmelCase )
| 31
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=3 , __UpperCAmelCase=18 , __UpperCAmelCase=30 , __UpperCAmelCase=4_00 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , ) -> int:
A : Optional[Any] = size if size is not None else {'''height''': 18, '''width''': 18}
A : Dict = parent
A : Any = batch_size
A : Dict = num_channels
A : Optional[Any] = image_size
A : Dict = min_resolution
A : Optional[int] = max_resolution
A : Any = do_resize
A : List[Any] = size
A : List[Any] = apply_ocr
def snake_case ( self ) -> Tuple:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def snake_case ( self ) -> int:
A : Optional[Any] = LayoutLMvaImageProcessingTester(self )
@property
def snake_case ( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ) -> Optional[Any]:
A : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''size''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''apply_ocr''' ) )
def snake_case ( self ) -> Union[str, Any]:
A : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
A : int = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def snake_case ( self ) -> List[str]:
pass
def snake_case ( self ) -> List[str]:
# Initialize image_processing
A : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
A : str = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , __UpperCAmelCase )
self.assertIsInstance(encoding.boxes , __UpperCAmelCase )
# Test batched
A : int = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def snake_case ( self ) -> Union[str, Any]:
# Initialize image_processing
A : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
A : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
A : int = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def snake_case ( self ) -> Optional[int]:
# Initialize image_processing
A : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
A : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
A : Dict = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def snake_case ( self ) -> Any:
# with apply_OCR = True
A : Dict = LayoutLMvaImageProcessor()
from datasets import load_dataset
A : Dict = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
A : Any = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
A : List[Any] = image_processing(__UpperCAmelCase , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
A : Tuple = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
A : int = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __UpperCAmelCase )
self.assertListEqual(encoding.boxes , __UpperCAmelCase )
# with apply_OCR = False
A : Union[str, Any] = LayoutLMvaImageProcessor(apply_ocr=__UpperCAmelCase )
A : List[str] = image_processing(__UpperCAmelCase , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 542
| 0
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def A_ ( _lowercase, _lowercase=False ):
'''simple docstring'''
snake_case_ :Dict = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""module.blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""module.blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""module.blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""module.blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""module.blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""module.blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case_ :List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def A_ ( _lowercase, _lowercase, _lowercase=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
snake_case_ :List[str] = """"""
else:
snake_case_ :Any = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case_ :List[str] = state_dict.pop(f"""module.blocks.{i}.attn.qkv.weight""" )
snake_case_ :Union[str, Any] = state_dict.pop(f"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case_ :Dict = in_proj_weight[
: config.hidden_size, :
]
snake_case_ :Optional[Any] = in_proj_bias[: config.hidden_size]
snake_case_ :Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ :List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case_ :int = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ :Any = in_proj_bias[-config.hidden_size :]
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :str = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(_lowercase, _lowercase )
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Any = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(_lowercase, _lowercase )
def A_ ( _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :Union[str, Any] = dct.pop(_lowercase )
snake_case_ :Union[str, Any] = val
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :Tuple = ViTMSNConfig()
snake_case_ :str = 1000
snake_case_ :List[Any] = """datasets/huggingface/label-files"""
snake_case_ :Tuple = """imagenet-1k-id2label.json"""
snake_case_ :Any = json.load(open(hf_hub_download(_lowercase, _lowercase ), """r""" ) )
snake_case_ :Union[str, Any] = {int(_lowercase ): v for k, v in idalabel.items()}
snake_case_ :Any = idalabel
snake_case_ :Dict = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
snake_case_ :int = 384
snake_case_ :Dict = 1536
snake_case_ :Any = 6
elif "l16" in checkpoint_url:
snake_case_ :str = 1024
snake_case_ :Optional[Any] = 4096
snake_case_ :List[str] = 24
snake_case_ :Tuple = 16
snake_case_ :List[Any] = 0.1
elif "b4" in checkpoint_url:
snake_case_ :str = 4
elif "l7" in checkpoint_url:
snake_case_ :Any = 7
snake_case_ :Union[str, Any] = 1024
snake_case_ :Union[str, Any] = 4096
snake_case_ :int = 24
snake_case_ :Any = 16
snake_case_ :List[str] = 0.1
snake_case_ :List[Any] = ViTMSNModel(_lowercase )
snake_case_ :List[Any] = torch.hub.load_state_dict_from_url(_lowercase, map_location="""cpu""" )["""target_encoder"""]
snake_case_ :Any = ViTImageProcessor(size=config.image_size )
remove_projection_head(_lowercase )
snake_case_ :Optional[Any] = create_rename_keys(_lowercase, base_model=_lowercase )
for src, dest in rename_keys:
rename_key(_lowercase, _lowercase, _lowercase )
read_in_q_k_v(_lowercase, _lowercase, base_model=_lowercase )
model.load_state_dict(_lowercase )
model.eval()
snake_case_ :Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case_ :Dict = Image.open(requests.get(_lowercase, stream=_lowercase ).raw )
snake_case_ :Dict = ViTImageProcessor(
size=config.image_size, image_mean=_lowercase, image_std=_lowercase )
snake_case_ :str = image_processor(images=_lowercase, return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
snake_case_ :int = model(**_lowercase )
snake_case_ :List[Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
snake_case_ :Optional[int] = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
snake_case_ :List[str] = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
snake_case_ :Optional[int] = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
snake_case_ :Tuple = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
snake_case_ :List[Any] = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3], _lowercase, atol=1e-4 )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__a = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 310
|
"""simple docstring"""
import os
def A_ ( ):
'''simple docstring'''
with open(os.path.dirname(_lowercase ) + """/grid.txt""" ) as f:
snake_case_ :Optional[int] = [] # noqa: E741
for _ in range(20 ):
l.append([int(_lowercase ) for x in f.readline().split()] )
snake_case_ :str = 0
# right
for i in range(20 ):
for j in range(17 ):
snake_case_ :Dict = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
snake_case_ :str = temp
# down
for i in range(17 ):
for j in range(20 ):
snake_case_ :Tuple = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
snake_case_ :Union[str, Any] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
snake_case_ :Optional[int] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
snake_case_ :Union[str, Any] = temp
# diagonal 2
for i in range(17 ):
for j in range(3, 20 ):
snake_case_ :int = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
snake_case_ :Any = temp
return maximum
if __name__ == "__main__":
print(solution())
| 310
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
def a__ ( self ) -> int:
_lowerCamelCase : Tuple = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowercase , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(_lowercase , '''neck_hidden_sizes''' ) )
self.parent.assertTrue(hasattr(_lowercase , '''num_attention_heads''' ) )
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=13 , _lowercase=32 , _lowercase=2 , _lowercase=3 , _lowercase=640 , _lowercase=4 , _lowercase="silu" , _lowercase=3 , _lowercase=32 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.02 , _lowercase=True , _lowercase=True , _lowercase=10 , _lowercase=None , ) -> Dict:
_lowerCamelCase : Tuple = parent
_lowerCamelCase : Optional[int] = batch_size
_lowerCamelCase : Union[str, Any] = image_size
_lowerCamelCase : Union[str, Any] = patch_size
_lowerCamelCase : Tuple = num_channels
_lowerCamelCase : Optional[int] = last_hidden_size
_lowerCamelCase : List[str] = num_attention_heads
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : Dict = conv_kernel_size
_lowerCamelCase : str = output_stride
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : Optional[int] = attention_probs_dropout_prob
_lowerCamelCase : Union[str, Any] = classifier_dropout_prob
_lowerCamelCase : str = use_labels
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : str = num_labels
_lowerCamelCase : Optional[Any] = initializer_range
_lowerCamelCase : Tuple = scope
def a__ ( self ) -> Union[str, Any]:
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : Dict = None
_lowerCamelCase : Optional[int] = None
if self.use_labels:
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_lowerCamelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def a__ ( self ) -> Dict:
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def a__ ( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> Tuple:
_lowerCamelCase : int = MobileViTModel(config=_lowercase )
model.to(_lowercase )
model.eval()
_lowerCamelCase : Dict = model(_lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def a__ ( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> Any:
_lowerCamelCase : List[str] = self.num_labels
_lowerCamelCase : Tuple = MobileViTForImageClassification(_lowercase )
model.to(_lowercase )
model.eval()
_lowerCamelCase : Any = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> Tuple:
_lowerCamelCase : Optional[int] = self.num_labels
_lowerCamelCase : Any = MobileViTForSemanticSegmentation(_lowercase )
model.to(_lowercase )
model.eval()
_lowerCamelCase : List[Any] = model(_lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_lowerCamelCase : str = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def a__ ( self ) -> List[str]:
_lowerCamelCase : Dict = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = config_and_inputs
_lowerCamelCase : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
__snake_case = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
__snake_case = (
{
"""feature-extraction""": MobileViTModel,
"""image-classification""": MobileViTForImageClassification,
"""image-segmentation""": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
def a__ ( self ) -> List[Any]:
_lowerCamelCase : Optional[int] = MobileViTModelTester(self )
_lowerCamelCase : Dict = MobileViTConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase )
def a__ ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViT does not use inputs_embeds''' )
def a__ ( self ) -> Dict:
pass
@unittest.skip(reason='''MobileViT does not support input and output embeddings''' )
def a__ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''MobileViT does not output attentions''' )
def a__ ( self ) -> Tuple:
pass
def a__ ( self ) -> List[Any]:
_lowerCamelCase, _lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Any = model_class(_lowercase )
_lowerCamelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Union[str, Any] = [*signature.parameters.keys()]
_lowerCamelCase : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowercase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def a__ ( self ) -> Union[str, Any]:
pass
def a__ ( self ) -> int:
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def a__ ( self ) -> Union[str, Any]:
def check_hidden_states_output(_lowercase , _lowercase , _lowercase ):
_lowerCamelCase : str = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
_lowerCamelCase : Optional[Any] = model(**self._prepare_for_class(_lowercase , _lowercase ) )
_lowerCamelCase : List[str] = outputs.hidden_states
_lowerCamelCase : Any = 5
self.assertEqual(len(_lowercase ) , _lowercase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_lowerCamelCase : Union[str, Any] = 2
for i in range(len(_lowercase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
_lowerCamelCase, _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : Optional[Any] = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
def a__ ( self ) -> Optional[int]:
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
def a__ ( self ) -> Union[str, Any]:
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowercase )
@slow
def a__ ( self ) -> Any:
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Optional[Any] = MobileViTModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def UpperCamelCase ( ) ->int:
_lowerCamelCase : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def a__ ( self ) -> int:
return MobileViTImageProcessor.from_pretrained('''apple/mobilevit-xx-small''' ) if is_vision_available() else None
@slow
def a__ ( self ) -> Optional[Any]:
_lowerCamelCase : Union[str, Any] = MobileViTForImageClassification.from_pretrained('''apple/mobilevit-xx-small''' ).to(_lowercase )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : int = prepare_img()
_lowerCamelCase : List[Any] = image_processor(images=_lowercase , return_tensors='''pt''' ).to(_lowercase )
# forward pass
with torch.no_grad():
_lowerCamelCase : str = model(**_lowercase )
# verify the logits
_lowerCamelCase : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowercase )
_lowerCamelCase : List[Any] = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowercase , atol=1E-4 ) )
@slow
def a__ ( self ) -> Union[str, Any]:
_lowerCamelCase : Dict = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
_lowerCamelCase : Tuple = model.to(_lowercase )
_lowerCamelCase : Union[str, Any] = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
_lowerCamelCase : Optional[Any] = prepare_img()
_lowerCamelCase : Optional[int] = image_processor(images=_lowercase , return_tensors='''pt''' ).to(_lowercase )
# forward pass
with torch.no_grad():
_lowerCamelCase : Dict = model(**_lowercase )
_lowerCamelCase : Union[str, Any] = outputs.logits
# verify the logits
_lowerCamelCase : List[Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _lowercase )
_lowerCamelCase : Dict = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=_lowercase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _lowercase , atol=1E-4 ) )
@slow
def a__ ( self ) -> int:
_lowerCamelCase : Union[str, Any] = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
_lowerCamelCase : str = model.to(_lowercase )
_lowerCamelCase : Any = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
_lowerCamelCase : Dict = prepare_img()
_lowerCamelCase : Dict = image_processor(images=_lowercase , return_tensors='''pt''' ).to(_lowercase )
# forward pass
with torch.no_grad():
_lowerCamelCase : int = model(**_lowercase )
_lowerCamelCase : Union[str, Any] = outputs.logits.detach().cpu()
_lowerCamelCase : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=_lowercase , target_sizes=[(50, 60)] )
_lowerCamelCase : List[str] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _lowercase )
_lowerCamelCase : int = image_processor.post_process_semantic_segmentation(outputs=_lowercase )
_lowerCamelCase : Any = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _lowercase )
| 434
|
"""simple docstring"""
from __future__ import annotations
from cmath import sqrt
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->tuple[complex, complex]:
if a == 0:
raise ValueError('''Coefficient \'a\' must not be zero.''' )
_lowerCamelCase : List[str] = b * b - 4 * a * c
_lowerCamelCase : int = (-b + sqrt(SCREAMING_SNAKE_CASE_ )) / (2 * a)
_lowerCamelCase : Tuple = (-b - sqrt(SCREAMING_SNAKE_CASE_ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def UpperCamelCase ( ) ->Optional[int]:
_lowerCamelCase, _lowerCamelCase : List[str] = quadratic_roots(a=5 , b=6 , c=1 )
print(F'''The solutions are: {solutiona} and {solutiona}''' )
if __name__ == "__main__":
main()
| 434
| 1
|
"""simple docstring"""
import doctest
from collections import deque
import numpy as np
class a :
def __init__( self : Dict ) -> None:
lowerCamelCase_ = [2, 1, 2, -1]
lowerCamelCase_ = [1, 2, 3, 4]
def UpperCamelCase ( self : List[Any] ) -> list[float]:
lowerCamelCase_ = len(self.first_signal )
lowerCamelCase_ = len(self.second_signal )
lowerCamelCase_ = max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# create a zero matrix of max_length x max_length
lowerCamelCase_ = [[0] * max_length for i in range(__SCREAMING_SNAKE_CASE )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(__SCREAMING_SNAKE_CASE ):
lowerCamelCase_ = deque(self.second_signal )
rotated_signal.rotate(__SCREAMING_SNAKE_CASE )
for j, item in enumerate(__SCREAMING_SNAKE_CASE ):
matrix[i][j] += item
# multiply the matrix with the first signal
lowerCamelCase_ = np.matmul(np.transpose(__SCREAMING_SNAKE_CASE ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(__SCREAMING_SNAKE_CASE , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 710
|
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase : int , _lowerCamelCase : int ) -> int:
while a != 0:
lowerCamelCase_ , lowerCamelCase_ = b % a, a
return b
def lowerCamelCase__ ( _lowerCamelCase : int , _lowerCamelCase : int ) -> int:
if gcd(_lowerCamelCase , _lowerCamelCase ) != 1:
lowerCamelCase_ = F'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(_lowerCamelCase )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1, 0, a
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 0, 1, m
while va != 0:
lowerCamelCase_ = ua // va
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 137
| 0
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
# General docstring
_SCREAMING_SNAKE_CASE : List[Any] = "RegNetConfig"
# Base docstring
_SCREAMING_SNAKE_CASE : Any = "facebook/regnet-y-040"
_SCREAMING_SNAKE_CASE : str = [1, 10_88, 7, 7]
# Image classification docstring
_SCREAMING_SNAKE_CASE : Tuple = "facebook/regnet-y-040"
_SCREAMING_SNAKE_CASE : Optional[Any] = "tabby, tabby cat"
_SCREAMING_SNAKE_CASE : Tuple = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case , __snake_case = 3 , __snake_case = 1 , __snake_case = 1 , __snake_case = "relu" , ):
super().__init__()
snake_case = nn.Convad(
__snake_case , __snake_case , kernel_size=__snake_case , stride=__snake_case , padding=kernel_size // 2 , groups=__snake_case , bias=__snake_case , )
snake_case = nn.BatchNormad(__snake_case )
snake_case = ACTaFN[activation] if activation is not None else nn.Identity()
def a_ ( self , __snake_case ):
snake_case = self.convolution(__snake_case )
snake_case = self.normalization(__snake_case )
snake_case = self.activation(__snake_case )
return hidden_state
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self , __snake_case ):
super().__init__()
snake_case = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
snake_case = config.num_channels
def a_ ( self , __snake_case ):
snake_case = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
snake_case = self.embedder(__snake_case )
return hidden_state
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case , __snake_case = 2 ):
super().__init__()
snake_case = nn.Convad(__snake_case , __snake_case , kernel_size=1 , stride=__snake_case , bias=__snake_case )
snake_case = nn.BatchNormad(__snake_case )
def a_ ( self , __snake_case ):
snake_case = self.convolution(__snake_case )
snake_case = self.normalization(__snake_case )
return hidden_state
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case ):
super().__init__()
snake_case = nn.AdaptiveAvgPoolad((1, 1) )
snake_case = nn.Sequential(
nn.Convad(__snake_case , __snake_case , kernel_size=1 ) , nn.ReLU() , nn.Convad(__snake_case , __snake_case , kernel_size=1 ) , nn.Sigmoid() , )
def a_ ( self , __snake_case ):
# b c h w -> b c 1 1
snake_case = self.pooler(__snake_case )
snake_case = self.attention(__snake_case )
snake_case = hidden_state * attention
return hidden_state
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case = 1 ):
super().__init__()
snake_case = in_channels != out_channels or stride != 1
snake_case = max(1 , out_channels // config.groups_width )
snake_case = (
RegNetShortCut(__snake_case , __snake_case , stride=__snake_case ) if should_apply_shortcut else nn.Identity()
)
snake_case = nn.Sequential(
RegNetConvLayer(__snake_case , __snake_case , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(__snake_case , __snake_case , stride=__snake_case , groups=__snake_case , activation=config.hidden_act ) , RegNetConvLayer(__snake_case , __snake_case , kernel_size=1 , activation=__snake_case ) , )
snake_case = ACTaFN[config.hidden_act]
def a_ ( self , __snake_case ):
snake_case = hidden_state
snake_case = self.layer(__snake_case )
snake_case = self.shortcut(__snake_case )
hidden_state += residual
snake_case = self.activation(__snake_case )
return hidden_state
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case = 1 ):
super().__init__()
snake_case = in_channels != out_channels or stride != 1
snake_case = max(1 , out_channels // config.groups_width )
snake_case = (
RegNetShortCut(__snake_case , __snake_case , stride=__snake_case ) if should_apply_shortcut else nn.Identity()
)
snake_case = nn.Sequential(
RegNetConvLayer(__snake_case , __snake_case , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(__snake_case , __snake_case , stride=__snake_case , groups=__snake_case , activation=config.hidden_act ) , RegNetSELayer(__snake_case , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(__snake_case , __snake_case , kernel_size=1 , activation=__snake_case ) , )
snake_case = ACTaFN[config.hidden_act]
def a_ ( self , __snake_case ):
snake_case = hidden_state
snake_case = self.layer(__snake_case )
snake_case = self.shortcut(__snake_case )
hidden_state += residual
snake_case = self.activation(__snake_case )
return hidden_state
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case = 2 , __snake_case = 2 , ):
super().__init__()
snake_case = RegNetXLayer if config.layer_type == '''x''' else RegNetYLayer
snake_case = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
__snake_case , __snake_case , __snake_case , stride=__snake_case , ) , *[layer(__snake_case , __snake_case , __snake_case ) for _ in range(depth - 1 )] , )
def a_ ( self , __snake_case ):
snake_case = self.layers(__snake_case )
return hidden_state
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self , __snake_case ):
super().__init__()
snake_case = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
__snake_case , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(__snake_case , config.depths[1:] ):
self.stages.append(RegNetStage(__snake_case , __snake_case , __snake_case , depth=__snake_case ) )
def a_ ( self , __snake_case , __snake_case = False , __snake_case = True ):
snake_case = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
snake_case = hidden_states + (hidden_state,)
snake_case = stage_module(__snake_case )
if output_hidden_states:
snake_case = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__snake_case , hidden_states=__snake_case )
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = RegNetConfig
__magic_name__ = 'regnet'
__magic_name__ = 'pixel_values'
__magic_name__ = True
def a_ ( self , __snake_case ):
if isinstance(__snake_case , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(__snake_case , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def a_ ( self , __snake_case , __snake_case=False ):
if isinstance(__snake_case , __snake_case ):
snake_case = value
_SCREAMING_SNAKE_CASE : Optional[Any] = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_SCREAMING_SNAKE_CASE : Optional[int] = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , snake_case__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class A__ ( snake_case__ ):
"""simple docstring"""
def __init__( self , __snake_case ):
super().__init__(__snake_case )
snake_case = config
snake_case = RegNetEmbeddings(__snake_case )
snake_case = RegNetEncoder(__snake_case )
snake_case = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__snake_case , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a_ ( self , __snake_case , __snake_case = None , __snake_case = None ):
snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case = return_dict if return_dict is not None else self.config.use_return_dict
snake_case = self.embedder(__snake_case )
snake_case = self.encoder(
__snake_case , output_hidden_states=__snake_case , return_dict=__snake_case )
snake_case = encoder_outputs[0]
snake_case = self.pooler(__snake_case )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__snake_case , pooler_output=__snake_case , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , snake_case__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class A__ ( snake_case__ ):
"""simple docstring"""
def __init__( self , __snake_case ):
super().__init__(__snake_case )
snake_case = config.num_labels
snake_case = RegNetModel(__snake_case )
# classification head
snake_case = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a_ ( self , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , ):
snake_case = return_dict if return_dict is not None else self.config.use_return_dict
snake_case = self.regnet(__snake_case , output_hidden_states=__snake_case , return_dict=__snake_case )
snake_case = outputs.pooler_output if return_dict else outputs[1]
snake_case = self.classifier(__snake_case )
snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
snake_case = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
snake_case = '''single_label_classification'''
else:
snake_case = '''multi_label_classification'''
if self.config.problem_type == "regression":
snake_case = MSELoss()
if self.num_labels == 1:
snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
snake_case = loss_fct(__snake_case , __snake_case )
elif self.config.problem_type == "single_label_classification":
snake_case = CrossEntropyLoss()
snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
snake_case = BCEWithLogitsLoss()
snake_case = loss_fct(__snake_case , __snake_case )
if not return_dict:
snake_case = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__snake_case , logits=__snake_case , hidden_states=outputs.hidden_states )
| 550
|
import os
from distutils.util import strtobool
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
for e in env_keys:
snake_case = int(os.environ.get(UpperCamelCase_ ,-1 ) )
if val >= 0:
return val
return default
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_=False ):
"""simple docstring"""
snake_case = os.environ.get(UpperCamelCase_ ,str(UpperCamelCase_ ) )
return strtobool(UpperCamelCase_ ) == 1 # As its name indicates `strtobool` actually returns an int...
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_="no" ):
"""simple docstring"""
snake_case = os.environ.get(UpperCamelCase_ ,str(UpperCamelCase_ ) )
return value
| 550
| 1
|
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : List[str]=13 , __lowerCamelCase : Any=7 , __lowerCamelCase : str=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : str=True , __lowerCamelCase : int=99 , __lowerCamelCase : Optional[int]=64 , __lowerCamelCase : Dict=32 , __lowerCamelCase : Dict=5 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Optional[Any]=37 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Any=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Optional[Any]=512 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Union[str, Any]=0.02 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : Any=4 , __lowerCamelCase : Dict=None , ) -> int:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_mask
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = embedding_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = scope
def lowercase_ ( self : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self : Dict ) -> Any:
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , )
def lowercase_ ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = MobileBertModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , token_type_ids=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase_ ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = MobileBertForMaskedLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : int , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] ) -> int:
SCREAMING_SNAKE_CASE__ = MobileBertForNextSentencePrediction(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowercase_ ( self : str , __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Any ) -> int:
SCREAMING_SNAKE_CASE__ = MobileBertForPreTraining(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , next_sentence_label=__lowerCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowercase_ ( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ) -> Dict:
SCREAMING_SNAKE_CASE__ = MobileBertForQuestionAnswering(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = MobileBertForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = MobileBertForTokenClassification(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.num_choices
SCREAMING_SNAKE_CASE__ = MobileBertForMultipleChoice(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ ( self : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
a = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
a = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a = True
def lowercase_ ( self : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int]=False ) -> str:
SCREAMING_SNAKE_CASE__ = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class in get_values(__lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
return inputs_dict
def lowercase_ ( self : Optional[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = MobileBertModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def lowercase_ ( self : Any ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def lowercase_ ( self : Optional[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__lowerCamelCase )
def lowercase_ ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCamelCase )
def lowercase_ ( self : Dict ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCamelCase )
def lowercase_ ( self : Any ) -> str:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCamelCase )
def lowercase_ ( self : Dict ) -> str:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCamelCase )
def lowercase_ ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCamelCase )
def lowercase_ ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCamelCase )
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
return torch.tensor(
_A , dtype=torch.long , device=_A , )
_SCREAMING_SNAKE_CASE : Optional[Any] = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = MobileBertModel.from_pretrained('''google/mobilebert-uncased''' ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase )[0]
SCREAMING_SNAKE_CASE__ = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[
[-2.4_73_65_26e07, 8.2_69_16_56e04, 1.6_52_18_38e05],
[-5.7_54_17_04e-01, 3.9_05_60_22e00, 4.4_01_15_07e00],
[2.6_04_73_59e00, 1.5_67_76_52e00, -1.7_32_41_88e-01],
]
] , device=__lowerCamelCase , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
SCREAMING_SNAKE_CASE__ = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
SCREAMING_SNAKE_CASE__ = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 472
|
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
for data in source_data:
for i, el in enumerate(_A ):
if len(_A ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(_A ) )
return data_lists
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
for dlist, weight in zip(_A , _A ):
SCREAMING_SNAKE_CASE__ = min(_A )
SCREAMING_SNAKE_CASE__ = max(_A )
SCREAMING_SNAKE_CASE__ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
SCREAMING_SNAKE_CASE__ = F'''Invalid weight of {weight:f} provided'''
raise ValueError(_A )
score_lists.append(_A )
return score_lists
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(_A ):
SCREAMING_SNAKE_CASE__ = final_scores[j] + ele
return final_scores
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = get_data(_A )
SCREAMING_SNAKE_CASE__ = calculate_each_score(_A , _A )
SCREAMING_SNAKE_CASE__ = generate_final_scores(_A )
# append scores to source data
for i, ele in enumerate(_A ):
source_data[i].append(_A )
return source_data
| 472
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ):
lowercase_ : List[Any] = StableDiffusionInpaintPipeline
lowercase_ : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
lowercase_ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowercase_ : Dict = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase_ : Optional[int] = frozenset([] )
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
torch.manual_seed(0 )
lowerCAmelCase :List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase , )
lowerCAmelCase :Optional[int] = PNDMScheduler(skip_prk_steps=UpperCAmelCase )
torch.manual_seed(0 )
lowerCAmelCase :str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCAmelCase :Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
lowerCAmelCase :Any = CLIPTextModel(UpperCAmelCase )
lowerCAmelCase :List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase :Optional[int] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCAmelCase__ ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : int=0 ) -> List[str]:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
lowerCAmelCase :Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
lowerCAmelCase :Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase :int = Image.fromarray(np.uinta(UpperCAmelCase ) ).convert('RGB' ).resize((64, 64) )
lowerCAmelCase :Any = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((64, 64) )
if str(UpperCAmelCase ).startswith('mps' ):
lowerCAmelCase :Dict = torch.manual_seed(UpperCAmelCase )
else:
lowerCAmelCase :Union[str, Any] = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
lowerCAmelCase :str = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase__ ( self : Dict ) -> Optional[int]:
lowerCAmelCase :Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase :Union[str, Any] = self.get_dummy_components()
lowerCAmelCase :str = StableDiffusionInpaintPipeline(**UpperCAmelCase )
lowerCAmelCase :str = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowerCAmelCase :Dict = self.get_dummy_inputs(UpperCAmelCase )
lowerCAmelCase :str = sd_pipe(**UpperCAmelCase ).images
lowerCAmelCase :List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase :Dict = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase__ ( self : List[Any] ) -> List[str]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
lowerCAmelCase :Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
lowerCAmelCase :Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
lowerCAmelCase :Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy' )
lowerCAmelCase :str = 'stabilityai/stable-diffusion-2-inpainting'
lowerCAmelCase :List[str] = StableDiffusionInpaintPipeline.from_pretrained(UpperCAmelCase , safety_checker=UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
lowerCAmelCase :Tuple = 'Face of a yellow cat, high resolution, sitting on a park bench'
lowerCAmelCase :Tuple = torch.manual_seed(0 )
lowerCAmelCase :str = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , mask_image=UpperCAmelCase , generator=UpperCAmelCase , output_type='np' , )
lowerCAmelCase :Union[str, Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def UpperCAmelCase__ ( self : str ) -> List[str]:
lowerCAmelCase :Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
lowerCAmelCase :Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
lowerCAmelCase :int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy' )
lowerCAmelCase :Tuple = 'stabilityai/stable-diffusion-2-inpainting'
lowerCAmelCase :int = StableDiffusionInpaintPipeline.from_pretrained(
UpperCAmelCase , torch_dtype=torch.floataa , safety_checker=UpperCAmelCase , )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
lowerCAmelCase :Dict = 'Face of a yellow cat, high resolution, sitting on a park bench'
lowerCAmelCase :Any = torch.manual_seed(0 )
lowerCAmelCase :Dict = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , mask_image=UpperCAmelCase , generator=UpperCAmelCase , output_type='np' , )
lowerCAmelCase :List[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def UpperCAmelCase__ ( self : Optional[Any] ) -> Any:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase :Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
lowerCAmelCase :int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
lowerCAmelCase :Tuple = 'stabilityai/stable-diffusion-2-inpainting'
lowerCAmelCase :List[Any] = PNDMScheduler.from_pretrained(UpperCAmelCase , subfolder='scheduler' )
lowerCAmelCase :List[Any] = StableDiffusionInpaintPipeline.from_pretrained(
UpperCAmelCase , safety_checker=UpperCAmelCase , scheduler=UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCAmelCase :Union[str, Any] = 'Face of a yellow cat, high resolution, sitting on a park bench'
lowerCAmelCase :Optional[Any] = torch.manual_seed(0 )
lowerCAmelCase :Union[str, Any] = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , mask_image=UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=2 , output_type='np' , )
lowerCAmelCase :Optional[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.6_5 * 10**9
| 553
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__SCREAMING_SNAKE_CASE = {
'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'],
'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'AdaptiveEmbedding',
'TransfoXLForSequenceClassification',
'TransfoXLLMHeadModel',
'TransfoXLModel',
'TransfoXLPreTrainedModel',
'load_tf_weights_in_transfo_xl',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAdaptiveEmbedding',
'TFTransfoXLForSequenceClassification',
'TFTransfoXLLMHeadModel',
'TFTransfoXLMainLayer',
'TFTransfoXLModel',
'TFTransfoXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 553
| 1
|
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Tuple = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
UpperCAmelCase_ : Any = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def _lowerCAmelCase(a : Union[str, Any] ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE =torch.load(__UpperCamelCase , map_location='''cpu''' )
return sd
def _lowerCAmelCase(a : Tuple , a : Union[str, Any] , a : Tuple=rename_keys_prefix ) -> int:
_SCREAMING_SNAKE_CASE =OrderedDict()
_SCREAMING_SNAKE_CASE =torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_SCREAMING_SNAKE_CASE =key
for name_pair in rename_keys_prefix:
_SCREAMING_SNAKE_CASE =new_key.replace(name_pair[0] , name_pair[1] )
_SCREAMING_SNAKE_CASE =d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_SCREAMING_SNAKE_CASE =new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def _lowerCAmelCase(a : int , a : List[str] ) -> str:
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), f"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
_SCREAMING_SNAKE_CASE ='''pretraining'''
if "vcr" in checkpoint_path:
_SCREAMING_SNAKE_CASE ={'''visual_embedding_dim''': 512}
elif "vqa_advanced" in checkpoint_path:
_SCREAMING_SNAKE_CASE ={'''visual_embedding_dim''': 2048}
elif "vqa" in checkpoint_path:
_SCREAMING_SNAKE_CASE ={'''visual_embedding_dim''': 2048}
elif "nlvr" in checkpoint_path:
_SCREAMING_SNAKE_CASE ={'''visual_embedding_dim''': 1024}
else:
raise NotImplementedError(f"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
_SCREAMING_SNAKE_CASE ={'''visual_embedding_dim''': 512}
_SCREAMING_SNAKE_CASE ='''multichoice'''
elif "vqa_advanced" in checkpoint_path:
_SCREAMING_SNAKE_CASE ={'''visual_embedding_dim''': 2048}
_SCREAMING_SNAKE_CASE ='''vqa_advanced'''
elif "vqa" in checkpoint_path:
_SCREAMING_SNAKE_CASE ={'''visual_embedding_dim''': 2048, '''num_labels''': 3129}
_SCREAMING_SNAKE_CASE ='''vqa'''
elif "nlvr" in checkpoint_path:
_SCREAMING_SNAKE_CASE ={
'''visual_embedding_dim''': 1024,
'''num_labels''': 2,
}
_SCREAMING_SNAKE_CASE ='''nlvr'''
_SCREAMING_SNAKE_CASE =VisualBertConfig(**__UpperCamelCase )
# Load State Dict
_SCREAMING_SNAKE_CASE =load_state_dict(__UpperCamelCase )
_SCREAMING_SNAKE_CASE =get_new_dict(__UpperCamelCase , __UpperCamelCase )
if model_type == "pretraining":
_SCREAMING_SNAKE_CASE =VisualBertForPreTraining(__UpperCamelCase )
elif model_type == "vqa":
_SCREAMING_SNAKE_CASE =VisualBertForQuestionAnswering(__UpperCamelCase )
elif model_type == "nlvr":
_SCREAMING_SNAKE_CASE =VisualBertForVisualReasoning(__UpperCamelCase )
elif model_type == "multichoice":
_SCREAMING_SNAKE_CASE =VisualBertForMultipleChoice(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
# Save Checkpoints
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
UpperCAmelCase_ : str = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 711
|
"""simple docstring"""
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ : List[Any] = 2_5_6
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
lowercase : Any = ["melgan"]
def __init__( self , _A , _A , _A , _A , _A , ):
'''simple docstring'''
super().__init__()
# From MELGAN
_SCREAMING_SNAKE_CASE =math.log(1E-5 ) # Matches MelGAN training.
_SCREAMING_SNAKE_CASE =4.0 # Largest value for most examples
_SCREAMING_SNAKE_CASE =1_2_8
self.register_modules(
notes_encoder=_A , continuous_encoder=_A , decoder=_A , scheduler=_A , melgan=_A , )
def UpperCamelCase_ ( self , _A , _A=(-1.0, 1.0) , _A=False ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =output_range
if clip:
_SCREAMING_SNAKE_CASE =torch.clip(_A , self.min_value , self.max_value )
# Scale to [0, 1].
_SCREAMING_SNAKE_CASE =(features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def UpperCamelCase_ ( self , _A , _A=(-1.0, 1.0) , _A=False ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =input_range
_SCREAMING_SNAKE_CASE =torch.clip(_A , _A , _A ) if clip else outputs
# Scale to [0, 1].
_SCREAMING_SNAKE_CASE =(outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def UpperCamelCase_ ( self , _A , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =input_tokens > 0
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.notes_encoder(
encoder_input_tokens=_A , encoder_inputs_mask=_A )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.continuous_encoder(
encoder_inputs=_A , encoder_inputs_mask=_A )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def UpperCamelCase_ ( self , _A , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =noise_time
if not torch.is_tensor(_A ):
_SCREAMING_SNAKE_CASE =torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(_A ) and len(timesteps.shape ) == 0:
_SCREAMING_SNAKE_CASE =timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_SCREAMING_SNAKE_CASE =timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
_SCREAMING_SNAKE_CASE =self.decoder(
encodings_and_masks=_A , decoder_input_tokens=_A , decoder_noise_time=_A )
return logits
@torch.no_grad()
def __call__( self , _A , _A = None , _A = 1_0_0 , _A = True , _A = "numpy" , _A = None , _A = 1 , ):
'''simple docstring'''
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_A , _A ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(_A )}.""" )
_SCREAMING_SNAKE_CASE =np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =np.zeros([1, 0, self.n_dims] , np.floataa )
_SCREAMING_SNAKE_CASE =torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_A , device=self.device )
for i, encoder_input_tokens in enumerate(_A ):
if i == 0:
_SCREAMING_SNAKE_CASE =torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
_SCREAMING_SNAKE_CASE =torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_A , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
_SCREAMING_SNAKE_CASE =ones
_SCREAMING_SNAKE_CASE =self.scale_features(
_A , output_range=[-1.0, 1.0] , clip=_A )
_SCREAMING_SNAKE_CASE =self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_A , continuous_mask=_A , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
_SCREAMING_SNAKE_CASE =randn_tensor(
shape=encoder_continuous_inputs.shape , generator=_A , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(_A )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
_SCREAMING_SNAKE_CASE =self.decode(
encodings_and_masks=_A , input_tokens=_A , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
_SCREAMING_SNAKE_CASE =self.scheduler.step(_A , _A , _A , generator=_A ).prev_sample
_SCREAMING_SNAKE_CASE =self.scale_to_features(_A , input_range=[-1.0, 1.0] )
_SCREAMING_SNAKE_CASE =mel[:1]
_SCREAMING_SNAKE_CASE =mel.cpu().float().numpy()
_SCREAMING_SNAKE_CASE =np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_A , _A )
logger.info('''Generated segment''' , _A )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
_SCREAMING_SNAKE_CASE =self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
_SCREAMING_SNAKE_CASE =full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=_A )
| 165
| 0
|
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
a = get_tests_dir("fixtures/test_sentencepiece.model")
a = {"target_lang": "fi", "source_lang": "en"}
a = ">>zh<<"
a = "Helsinki-NLP/"
if is_torch_available():
a = "pt"
elif is_tf_available():
a = "tf"
else:
a = "jax"
@require_sentencepiece
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : List[str] = MarianTokenizer
__UpperCamelCase : List[str] = False
__UpperCamelCase : int = True
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
super().setUp()
__SCREAMING_SNAKE_CASE = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
__SCREAMING_SNAKE_CASE = dict(zip(lowerCamelCase ,range(len(lowerCamelCase ) ) ) )
__SCREAMING_SNAKE_CASE = Path(self.tmpdirname )
save_json(lowerCamelCase ,save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(lowerCamelCase ,save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowerCamelCase ,save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(lowerCamelCase ,save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
__SCREAMING_SNAKE_CASE = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Any ,**lowerCamelCase : List[Any] ):
'''simple docstring'''
return MarianTokenizer.from_pretrained(self.tmpdirname ,**lowerCamelCase )
def UpperCAmelCase__ ( self : Any ,lowerCamelCase : str ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """</s>"""
__SCREAMING_SNAKE_CASE = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) ,lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) ,lowerCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""</s>""" )
self.assertEqual(vocab_keys[1] ,"""<unk>""" )
self.assertEqual(vocab_keys[-1] ,"""<pad>""" )
self.assertEqual(len(lowerCamelCase ) ,9 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,9 )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = MarianTokenizer.from_pretrained(f"""{ORG_NAME}opus-mt-en-de""" )
__SCREAMING_SNAKE_CASE = en_de_tokenizer(["""I am a small frog"""] ,return_tensors=lowerCamelCase )
self.assertIsInstance(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(lowerCamelCase ,batch.input_ids[0] )
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowerCamelCase )
__SCREAMING_SNAKE_CASE = [x.name for x in Path(lowerCamelCase ).glob("""*""" )]
self.assertIn("""source.spm""" ,lowerCamelCase )
MarianTokenizer.from_pretrained(lowerCamelCase )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = tok(
["""I am a small frog""" * 1000, """I am a small frog"""] ,padding=lowerCamelCase ,truncation=lowerCamelCase ,return_tensors=lowerCamelCase )
self.assertIsInstance(lowerCamelCase ,lowerCamelCase )
self.assertEqual(batch.input_ids.shape ,(2, 512) )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = tok(["""I am a tiny frog""", """I am a small frog"""] ,padding=lowerCamelCase ,return_tensors=lowerCamelCase )
self.assertIsInstance(lowerCamelCase ,lowerCamelCase )
self.assertEqual(batch_smaller.input_ids.shape ,(2, 10) )
@slow
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {"""input_ids""": [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase ,model_name="""Helsinki-NLP/opus-mt-en-de""" ,revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" ,decode_kwargs={"""use_source_tokenizer""": True} ,)
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
__SCREAMING_SNAKE_CASE = """Tämä on testi"""
__SCREAMING_SNAKE_CASE = """This is a test"""
__SCREAMING_SNAKE_CASE = [76, 7, 2047, 2]
__SCREAMING_SNAKE_CASE = [69, 12, 11, 940, 2]
__SCREAMING_SNAKE_CASE = tokenizer(lowerCamelCase ).input_ids
self.assertListEqual(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer(text_target=lowerCamelCase ).input_ids
self.assertListEqual(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer.decode(lowerCamelCase ,skip_special_tokens=lowerCamelCase )
self.assertEqual(lowerCamelCase ,lowerCamelCase )
| 109
|
'''simple docstring'''
import os
import sys
_SCREAMING_SNAKE_CASE = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
_SCREAMING_SNAKE_CASE = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def lowerCamelCase( *SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ) -> int:
return AutoConfig.from_pretrained(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def lowerCamelCase( *SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ) -> List[str]:
return AutoTokenizer.from_pretrained(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
@add_start_docstrings(AutoModel.__doc__ )
def lowerCamelCase( *SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
return AutoModel.from_pretrained(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def lowerCamelCase( *SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
return AutoModelForCausalLM.from_pretrained(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def lowerCamelCase( *SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ) -> Dict:
return AutoModelForMaskedLM.from_pretrained(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def lowerCamelCase( *SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
return AutoModelForSequenceClassification.from_pretrained(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def lowerCamelCase( *SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ) -> str:
return AutoModelForQuestionAnswering.from_pretrained(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
| 366
| 0
|
import os
def SCREAMING_SNAKE_CASE( ) -> str:
with open(os.path.dirname(__UpperCamelCase ) + "/p022_names.txt" ) as file:
a__ : Optional[Any] = str(file.readlines()[0] )
a__ : Optional[int] = names.replace("\"" , "" ).split("," )
names.sort()
a__ : List[str] = 0
a__ : List[Any] = 0
for i, name in enumerate(__UpperCamelCase ):
for letter in name:
name_score += ord(__UpperCamelCase ) - 64
total_score += (i + 1) * name_score
a__ : Optional[Any] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 207
|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
lowerCamelCase = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class _a ( unittest.TestCase ):
'''simple docstring'''
def _A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None ):
"""simple docstring"""
a__ : Optional[Any] = None
a__ : Dict = os.path.abspath(os.path.join("examples" , "by_feature" ) )
a__ : Tuple = os.path.abspath("examples" )
for item in os.listdir(__UpperCAmelCase ):
if item not in EXCLUDE_EXAMPLES:
a__ : Optional[int] = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
if os.path.isfile(__UpperCAmelCase ) and ".py" in item_path:
with self.subTest(
tested_script=__UpperCAmelCase , feature_script=__UpperCAmelCase , tested_section="main()" if parser_only else "training_function()" , ):
a__ : List[Any] = compare_against_test(
os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
a__ : Optional[Any] = "\n".join(__UpperCAmelCase )
if special_strings is not None:
for string in special_strings:
a__ : Optional[Any] = diff.replace(__UpperCAmelCase , "" )
self.assertEqual(__UpperCAmelCase , "" )
def _A ( self ):
"""simple docstring"""
self.one_complete_example("complete_nlp_example.py" , __UpperCAmelCase )
self.one_complete_example("complete_nlp_example.py" , __UpperCAmelCase )
def _A ( self ):
"""simple docstring"""
a__ : List[Any] = os.path.abspath(os.path.join("examples" , "cv_example.py" ) )
a__ : int = [
" " * 16 + "{\n\n",
" " * 20 + "\"accuracy\": eval_metric[\"accuracy\"],\n\n",
" " * 20 + "\"f1\": eval_metric[\"f1\"],\n\n",
" " * 20 + "\"train_loss\": total_loss.item() / len(train_dataloader),\n\n",
" " * 20 + "\"epoch\": epoch,\n\n",
" " * 16 + "},\n\n",
" " * 16 + "step=epoch,\n",
" " * 12,
" " * 8 + "for step, batch in enumerate(active_dataloader):\n",
]
self.one_complete_example("complete_cv_example.py" , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
self.one_complete_example("complete_cv_example.py" , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
@mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A :int = False
@classmethod
def _A ( cls ):
"""simple docstring"""
super().setUpClass()
a__ : Optional[Any] = tempfile.mkdtemp()
a__ : Tuple = os.path.join(cls._tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
a__ : Any = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def _A ( cls ):
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def _A ( self ):
"""simple docstring"""
a__ : Dict = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "epoch_0" ) ) )
def _A ( self ):
"""simple docstring"""
a__ : Union[str, Any] = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
a__ : Optional[int] = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "step_2" ) ) )
def _A ( self ):
"""simple docstring"""
a__ : Dict = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split()
a__ : List[str] = run_command(self._launch_args + testargs , return_stdout=__UpperCAmelCase )
self.assertNotIn("epoch 0:" , __UpperCAmelCase )
self.assertIn("epoch 1:" , __UpperCAmelCase )
def _A ( self ):
"""simple docstring"""
a__ : List[Any] = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split()
a__ : List[str] = run_command(self._launch_args + testargs , return_stdout=__UpperCAmelCase )
if torch.cuda.is_available():
a__ : List[str] = torch.cuda.device_count()
else:
a__ : str = 1
if num_processes > 1:
self.assertNotIn("epoch 0:" , __UpperCAmelCase )
self.assertIn("epoch 1:" , __UpperCAmelCase )
else:
self.assertIn("epoch 0:" , __UpperCAmelCase )
self.assertIn("epoch 1:" , __UpperCAmelCase )
@slow
def _A ( self ):
"""simple docstring"""
a__ : List[str] = "\n examples/by_feature/cross_validation.py\n --num_folds 2\n ".split()
with mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "0"} ):
a__ : Any = run_command(self._launch_args + testargs , return_stdout=__UpperCAmelCase )
a__ : List[str] = re.findall("({.+})" , __UpperCAmelCase )
a__ : str = [r for r in results if "accuracy" in r][-1]
a__ : Optional[int] = ast.literal_eval(__UpperCAmelCase )
self.assertGreaterEqual(results["accuracy"] , 0.7_5 )
def _A ( self ):
"""simple docstring"""
a__ : Any = ["examples/by_feature/multi_process_metrics.py"]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _A ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
a__ : Tuple = f'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , "tracking" ) ) )
def _A ( self ):
"""simple docstring"""
a__ : List[str] = ["examples/by_feature/gradient_accumulation.py"]
run_command(self._launch_args + testargs )
def _A ( self ):
"""simple docstring"""
a__ : Optional[int] = ["examples/by_feature/local_sgd.py"]
run_command(self._launch_args + testargs )
| 207
| 1
|
'''simple docstring'''
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class lowercase__ ( snake_case_ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__="" , lowerCamelCase__="train" ):
'''simple docstring'''
assert os.path.isdir(lowerCamelCase__ )
UpperCamelCase = []
UpperCamelCase = os.listdir(lowerCamelCase__ )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
UpperCamelCase = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
if not os.path.isfile(lowerCamelCase__ ):
continue
self.documents.append(lowerCamelCase__ )
def __len__( self ):
'''simple docstring'''
return len(self.documents )
def __getitem__( self , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = self.documents[idx]
UpperCamelCase = document_path.split('''/''' )[-1]
with open(lowerCamelCase__ , encoding='''utf-8''' ) as source:
UpperCamelCase = source.read()
UpperCamelCase , UpperCamelCase = process_story(lowerCamelCase__ )
return document_name, story_lines, summary_lines
def __snake_case ( _UpperCAmelCase : Union[str, Any]):
UpperCamelCase = list(filter(lambda _UpperCAmelCase: len(_UpperCAmelCase) != 0, [line.strip() for line in raw_story.split('''\n''')]))
# for some unknown reason some lines miss a period, add it
UpperCamelCase = [_add_missing_period(_UpperCAmelCase) for line in nonempty_lines]
# gather article lines
UpperCamelCase = []
UpperCamelCase = deque(_UpperCAmelCase)
while True:
try:
UpperCamelCase = lines.popleft()
if element.startswith('''@highlight'''):
break
story_lines.append(_UpperCAmelCase)
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
UpperCamelCase = list(filter(lambda _UpperCAmelCase: not t.startswith('''@highlight'''), _UpperCAmelCase))
return story_lines, summary_lines
def __snake_case ( _UpperCAmelCase : int):
UpperCamelCase = ['''.''', '''!''', '''?''', '''...''', '''\'''', '''`''', '''"''', '''\u2019''', '''\u2019''', ''')''']
if line.startswith('''@highlight'''):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def __snake_case ( _UpperCAmelCase : List[Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Any):
if len(_UpperCAmelCase) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(_UpperCAmelCase)))
return sequence
def __snake_case ( _UpperCAmelCase : Tuple, _UpperCAmelCase : List[str]):
UpperCamelCase = torch.ones_like(_UpperCAmelCase)
UpperCamelCase = sequence == pad_token_id
UpperCamelCase = 0
return mask
def __snake_case ( _UpperCAmelCase : List[Any], _UpperCAmelCase : int, _UpperCAmelCase : Any):
UpperCamelCase = [tokenizer.encode(_UpperCAmelCase) for line in story_lines]
UpperCamelCase = [token for sentence in story_lines_token_ids for token in sentence]
UpperCamelCase = [tokenizer.encode(_UpperCAmelCase) for line in summary_lines]
UpperCamelCase = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def __snake_case ( _UpperCAmelCase : List[str], _UpperCAmelCase : str):
UpperCamelCase = []
for sequence in batch:
UpperCamelCase = -1
UpperCamelCase = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2)
batch_embeddings.append(_UpperCAmelCase)
return torch.tensor(_UpperCAmelCase)
| 212
|
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''', [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''', num_bytes=1337, num_examples=42, dataset_name='''my_dataset''')}),
SplitDict({'''train''': SplitInfo(name='''train''', num_bytes=1337, num_examples=42)}),
SplitDict({'''train''': SplitInfo()}),
], )
def __snake_case ( _UpperCAmelCase : SplitDict):
UpperCamelCase = split_dict._to_yaml_list()
assert len(_UpperCAmelCase) == len(_UpperCAmelCase)
UpperCamelCase = SplitDict._from_yaml_list(_UpperCAmelCase)
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
UpperCamelCase = None
# the split name of split_dict takes over the name of the split info object
UpperCamelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''', [SplitInfo(), SplitInfo(dataset_name=_UpperCAmelCase), SplitInfo(dataset_name='''my_dataset''')])
def __snake_case ( _UpperCAmelCase : Dict):
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
UpperCamelCase = asdict(SplitDict({'''train''': split_info}))
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 212
| 1
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
A_ : Tuple = logging.get_logger(__name__)
def UpperCAmelCase__ ( UpperCAmelCase__ :Dict ):
'''simple docstring'''
a = r"\w+[.]\d+"
a = re.findall(UpperCAmelCase__ , UpperCAmelCase__ )
for pat in pats:
a = key.replace(UpperCAmelCase__ , "_".join(pat.split("." ) ) )
return key
def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :Union[str, Any] ):
'''simple docstring'''
a = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
a = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
a = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
a = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
a = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
a = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
a = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
a = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
a = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
a = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCAmelCase__ ( UpperCAmelCase__ :Dict , UpperCAmelCase__ :Optional[Any] , UpperCAmelCase__ :Union[str, Any]=42 ):
'''simple docstring'''
a = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
a = flax_model.init_weights(PRNGKey(UpperCAmelCase__ ) )
a = flatten_dict(UpperCAmelCase__ )
a = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
a = rename_key(UpperCAmelCase__ )
a = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
a , a = rename_key_and_reshape_tensor(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
a = jnp.asarray(UpperCAmelCase__ )
return unflatten_dict(UpperCAmelCase__ )
| 710
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Optional[int] = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
A_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 32
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : List[str] = logging.get_logger(__name__)
__lowercase : Dict = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __lowercase ( __UpperCAmelCase ):
lowerCamelCase : Any = '''cvt'''
def __init__(self , A=3 , A=[7, 3, 3] , A=[4, 2, 2] , A=[2, 1, 1] , A=[6_4, 1_9_2, 3_8_4] , A=[1, 3, 6] , A=[1, 2, 1_0] , A=[4.0, 4.0, 4.0] , A=[0.0, 0.0, 0.0] , A=[0.0, 0.0, 0.0] , A=[0.0, 0.0, 0.1] , A=[True, True, True] , A=[False, False, True] , A=["dw_bn", "dw_bn", "dw_bn"] , A=[3, 3, 3] , A=[1, 1, 1] , A=[2, 2, 2] , A=[1, 1, 1] , A=[1, 1, 1] , A=0.02 , A=1E-12 , **A , ):
super().__init__(**lowerCamelCase__ )
lowerCamelCase_ : Dict = num_channels
lowerCamelCase_ : Optional[Any] = patch_sizes
lowerCamelCase_ : List[str] = patch_stride
lowerCamelCase_ : str = patch_padding
lowerCamelCase_ : Optional[int] = embed_dim
lowerCamelCase_ : Tuple = num_heads
lowerCamelCase_ : Optional[int] = depth
lowerCamelCase_ : Optional[int] = mlp_ratio
lowerCamelCase_ : Union[str, Any] = attention_drop_rate
lowerCamelCase_ : int = drop_rate
lowerCamelCase_ : Dict = drop_path_rate
lowerCamelCase_ : Dict = qkv_bias
lowerCamelCase_ : str = cls_token
lowerCamelCase_ : Any = qkv_projection_method
lowerCamelCase_ : int = kernel_qkv
lowerCamelCase_ : Dict = padding_kv
lowerCamelCase_ : Optional[int] = stride_kv
lowerCamelCase_ : List[str] = padding_q
lowerCamelCase_ : Any = stride_q
lowerCamelCase_ : int = initializer_range
lowerCamelCase_ : int = layer_norm_eps
| 422
|
"""simple docstring"""
import random
def _lowerCamelCase ( lowerCamelCase__ : Tuple , lowerCamelCase__ : Dict , lowerCamelCase__ : str ):
lowercase__ : List[Any] = a[left_index]
lowercase__ : List[Any] = left_index + 1
for j in range(left_index + 1 , lowerCamelCase__ ):
if a[j] < pivot:
lowercase__ , lowercase__ : Dict = a[i], a[j]
i += 1
lowercase__ , lowercase__ : Any = a[i - 1], a[left_index]
return i - 1
def _lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int] ):
if left < right:
lowercase__ : str = random.randint(lowerCamelCase__ , right - 1 )
lowercase__ , lowercase__ : Any = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
lowercase__ : str = partition(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
quick_sort_random(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
lowerCamelCase__ , pivot_index + 1 , lowerCamelCase__ ) # recursive quicksort to the right of the pivot point
def _lowerCamelCase ( ):
lowercase__ : List[Any] = input("""Enter numbers separated by a comma:\n""" ).strip()
lowercase__ : int = [int(lowerCamelCase__ ) for item in user_input.split(""",""" )]
quick_sort_random(lowerCamelCase__ , 0 , len(lowerCamelCase__ ) )
print(lowerCamelCase__ )
if __name__ == "__main__":
main()
| 200
| 0
|
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
UpperCAmelCase__ : str = {
'/attention/': '/0/SelfAttention/',
'/self_attention/': '/0/SelfAttention/',
'/encoder_decoder_attention/': '/1/EncDecAttention/',
'value': 'v',
'query': 'q',
'key': 'k',
'out': 'o',
'pre_self_attention_layer_norm': '0/layer_norm',
'pre_cross_attention_layer_norm': '1/layer_norm',
'pre_attention_layer_norm': '0/layer_norm', # previously 1, but seems wrong
'token_embedder': 'shared',
'encoder_norm': 'final_layer_norm',
'decoder_norm': 'final_layer_norm',
'relpos_bias/rel_embedding': 'block/0/layer/0/SelfAttention/relative_attention_bias/weight',
'router/router_weights/w/': 'router/classifier/',
'roer/roer_weights/w/': 'router/classifier/',
'logits_dense': 'lm_head',
}
def _A ( _UpperCamelCase ):
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
_UpperCAmelCase : Any = list(s_dict.keys() )
for key in keys:
_UpperCAmelCase : Optional[Any] = R'''.*/layers_(\d+)'''
_UpperCAmelCase : Optional[int] = key
if re.match(_UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase : Any = re.sub(R'''layers_(\d+)''' , R'''block/\1/layer''' , _UpperCamelCase )
_UpperCAmelCase : Union[str, Any] = R'''(encoder|decoder)\/'''
if re.match(_UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase : List[str] = re.match(_UpperCamelCase , _UpperCamelCase ).groups()
if groups[0] == "encoder":
_UpperCAmelCase : int = re.sub(R'''/mlp/''' , R'''/1/mlp/''' , _UpperCamelCase )
_UpperCAmelCase : str = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/1/layer_norm/''' , _UpperCamelCase )
elif groups[0] == "decoder":
_UpperCAmelCase : Dict = re.sub(R'''/mlp/''' , R'''/2/mlp/''' , _UpperCamelCase )
_UpperCAmelCase : Union[str, Any] = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/2/layer_norm/''' , _UpperCamelCase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
_UpperCAmelCase : Optional[Any] = new_key.replace(_UpperCamelCase , _UpperCamelCase )
print(F'''{key} -> {new_key}''' )
_UpperCAmelCase : Tuple = s_dict.pop(_UpperCamelCase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_UpperCAmelCase : List[str] = s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_UpperCAmelCase : Optional[int] = s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
_UpperCAmelCase : Any = s_dict[key].shape[0]
_UpperCAmelCase : Any = s_dict[key]
for idx in range(_UpperCamelCase ):
_UpperCAmelCase : List[str] = expert_weihts[idx]
print(F'''{key} -> {key.replace('expert/' , 'nested fstring' )}''' )
s_dict.pop(_UpperCamelCase )
return s_dict
UpperCAmelCase__ : List[str] = {
'NUM_ENCODER_LAYERS': 'num_layers',
'NUM_DECODER_LAYERS': 'num_decoder_layers',
'NUM_HEADS': 'num_heads',
'HEAD_DIM': 'd_kv',
'EMBED_DIM': 'd_model',
'MLP_DIM': 'd_ff',
'NUM_SELECTED_EXPERTS': 'num_selected_experts',
'NUM_ENCODER_SPARSE_LAYERS': 'num_sparse_encoder_layers',
'NUM_DECODER_SPARSE_LAYERS': 'num_sparse_decoder_layers',
'dense.MlpBlock.activations': 'feed_forward_proj',
}
def _A ( _UpperCamelCase , _UpperCamelCase ):
# Convert a google style config to the hugging face fromat
import regex as re
with open(_UpperCamelCase , '''r''' ) as f:
_UpperCAmelCase : Dict = f.read()
_UpperCAmelCase : Optional[int] = re.findall(R'''(.*) = ([0-9.]*)''' , _UpperCamelCase )
_UpperCAmelCase : Any = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
_UpperCAmelCase : int = float(_UpperCamelCase ) if '''.''' in value else int(_UpperCamelCase )
_UpperCAmelCase : Dict = re.findall(R'''(.*activations) = \(\'(.*)\',\)''' , _UpperCamelCase )[0]
_UpperCAmelCase : Optional[int] = str(activation[1] )
_UpperCAmelCase : Tuple = num_experts
_UpperCAmelCase : Optional[int] = SwitchTransformersConfig(**_UpperCamelCase )
return config
def _A ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase="./" , _UpperCamelCase=8 ):
# Initialise PyTorch model
print(F'''Loading flax weights from : {flax_checkpoint_path}''' )
_UpperCAmelCase : List[str] = checkpoints.load_tax_checkpoint(_UpperCamelCase )
if gin_file is not None:
_UpperCAmelCase : Optional[Any] = convert_gin_to_config(_UpperCamelCase , _UpperCamelCase )
else:
_UpperCAmelCase : Tuple = SwitchTransformersConfig.from_pretrained(_UpperCamelCase )
_UpperCAmelCase : Tuple = SwitchTransformersForConditionalGeneration(_UpperCamelCase )
_UpperCAmelCase : List[str] = flax_params['''target''']
_UpperCAmelCase : Any = flatten_dict(_UpperCamelCase , sep='''/''' )
_UpperCAmelCase : Tuple = rename_keys(_UpperCamelCase )
_UpperCAmelCase : Optional[Any] = unflatten_dict(_UpperCamelCase , sep='''/''' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(_UpperCamelCase , _UpperCamelCase )
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'
' model architecture. If not provided, a `gin_file` has to be provided.'
),
)
parser.add_argument(
'--gin_file',
default=None,
type=str,
required=False,
help='Path to the gin config file. If not provided, a `config_file` has to be passed ',
)
parser.add_argument(
'--config_name', default=None, type=str, required=False, help='Config name of SwitchTransformers model.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output pytorch model.'
)
parser.add_argument('--num_experts', default=8, type=int, required=False, help='Number of experts')
UpperCAmelCase__ : Union[str, Any] = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 416
|
def _A ( _UpperCamelCase , _UpperCamelCase ):
return number | (1 << position)
def _A ( _UpperCamelCase , _UpperCamelCase ):
return number & ~(1 << position)
def _A ( _UpperCamelCase , _UpperCamelCase ):
return number ^ (1 << position)
def _A ( _UpperCamelCase , _UpperCamelCase ):
return ((number >> position) & 1) == 1
def _A ( _UpperCamelCase , _UpperCamelCase ):
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 416
| 1
|
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class lowercase__( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :Union[str, Any] = RoFormerTokenizer
a :str = RoFormerTokenizerFast
a :List[str] = True
a :Optional[Any] = True
def _lowercase ( self : Optional[Any] ) -> int:
super().setUp()
def _lowercase ( self : Dict , **SCREAMING_SNAKE_CASE_ : Tuple ) -> Any:
return self.tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[str] , **SCREAMING_SNAKE_CASE_ : Any ) -> Union[str, Any]:
return self.rust_tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[Any] ) -> str:
lowercase_ = '''永和服装饰品有限公司,今天天气非常好'''
lowercase_ = '''永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'''
return input_text, output_text
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
lowercase_ = self.get_tokenizer()
lowercase_ , lowercase_ = self.get_chinese_input_output_texts()
lowercase_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , output_text.split() )
lowercase_ = tokens + [tokenizer.unk_token]
lowercase_ = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : str ) -> str:
lowercase_ = self.get_rust_tokenizer()
lowercase_ , lowercase_ = self.get_chinese_input_output_texts()
lowercase_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , output_text.split() )
lowercase_ = tokens + [tokenizer.unk_token]
lowercase_ = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Dict ) -> str:
pass
def _lowercase ( self : Optional[int] ) -> List[Any]:
pass
def _lowercase ( self : Optional[Any] ) -> List[Any]:
pass
| 97
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCAmelCase__ =logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
@dataclass
class A__:
lowerCAmelCase = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
lowerCAmelCase = field(
default=__magic_name__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
lowerCAmelCase = field(
default=__magic_name__ , metadata={'''help''': '''The column name of the images in the files.'''} )
lowerCAmelCase = field(default=__magic_name__ , metadata={'''help''': '''A folder containing the training data.'''} )
lowerCAmelCase = field(default=__magic_name__ , metadata={'''help''': '''A folder containing the validation data.'''} )
lowerCAmelCase = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
lowerCAmelCase = field(
default=__magic_name__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
lowerCAmelCase = field(
default=__magic_name__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def _a ( self : Dict ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {}
if self.train_dir is not None:
__SCREAMING_SNAKE_CASE = self.train_dir
if self.validation_dir is not None:
__SCREAMING_SNAKE_CASE = self.validation_dir
__SCREAMING_SNAKE_CASE = data_files if data_files else None
@dataclass
class A__:
lowerCAmelCase = field(
default=__magic_name__ , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
lowerCAmelCase = field(
default=__magic_name__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} )
lowerCAmelCase = field(
default=__magic_name__ , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
lowerCAmelCase = field(
default=__magic_name__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
lowerCAmelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
lowerCAmelCase = field(default=__magic_name__ , metadata={'''help''': '''Name or path of preprocessor config.'''} )
lowerCAmelCase = field(
default=__magic_name__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
lowerCAmelCase = field(
default=0.75 , metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} )
lowerCAmelCase = field(
default=__magic_name__ , metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} )
@dataclass
class A__( __magic_name__ ):
lowerCAmelCase = field(
default=1E-3 , metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} )
def _a ( UpperCAmelCase__ ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = torch.stack([example['''pixel_values'''] for example in examples] )
return {"pixel_values": pixel_values}
def _a ( ) -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mae''' , UpperCAmelCase__ , UpperCAmelCase__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase__ )
transformers.utils.logging.set_verbosity(UpperCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__SCREAMING_SNAKE_CASE = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
__SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
__SCREAMING_SNAKE_CASE = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , UpperCAmelCase__ ) and data_args.train_val_split > 0.0:
__SCREAMING_SNAKE_CASE = ds['''train'''].train_test_split(data_args.train_val_split )
__SCREAMING_SNAKE_CASE = split['''train''']
__SCREAMING_SNAKE_CASE = split['''test''']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__SCREAMING_SNAKE_CASE = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
__SCREAMING_SNAKE_CASE = ViTMAEConfig.from_pretrained(model_args.config_name , **UpperCAmelCase__ )
elif model_args.model_name_or_path:
__SCREAMING_SNAKE_CASE = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE = ViTMAEConfig()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
# adapt config
config.update(
{
'''mask_ratio''': model_args.mask_ratio,
'''norm_pix_loss''': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
__SCREAMING_SNAKE_CASE = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **UpperCAmelCase__ )
elif model_args.model_name_or_path:
__SCREAMING_SNAKE_CASE = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
__SCREAMING_SNAKE_CASE = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
__SCREAMING_SNAKE_CASE = ViTMAEForPreTraining(UpperCAmelCase__ )
if training_args.do_train:
__SCREAMING_SNAKE_CASE = ds['''train'''].column_names
else:
__SCREAMING_SNAKE_CASE = ds['''validation'''].column_names
if data_args.image_column_name is not None:
__SCREAMING_SNAKE_CASE = data_args.image_column_name
elif "image" in column_names:
__SCREAMING_SNAKE_CASE = '''image'''
elif "img" in column_names:
__SCREAMING_SNAKE_CASE = '''img'''
else:
__SCREAMING_SNAKE_CASE = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
__SCREAMING_SNAKE_CASE = image_processor.size['''shortest_edge''']
else:
__SCREAMING_SNAKE_CASE = (image_processor.size['''height'''], image_processor.size['''width'''])
__SCREAMING_SNAKE_CASE = Compose(
[
Lambda(lambda UpperCAmelCase__ : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(UpperCAmelCase__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = [transforms(UpperCAmelCase__ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
__SCREAMING_SNAKE_CASE = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(UpperCAmelCase__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
__SCREAMING_SNAKE_CASE = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(UpperCAmelCase__ )
# Compute absolute learning rate
__SCREAMING_SNAKE_CASE = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
__SCREAMING_SNAKE_CASE = training_args.base_learning_rate * total_train_batch_size / 2_56
# Initialize our trainer
__SCREAMING_SNAKE_CASE = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , )
# Training
if training_args.do_train:
__SCREAMING_SNAKE_CASE = None
if training_args.resume_from_checkpoint is not None:
__SCREAMING_SNAKE_CASE = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__SCREAMING_SNAKE_CASE = last_checkpoint
__SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=UpperCAmelCase__ )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__SCREAMING_SNAKE_CASE = trainer.evaluate()
trainer.log_metrics('''eval''' , UpperCAmelCase__ )
trainer.save_metrics('''eval''' , UpperCAmelCase__ )
# Write model card and (optionally) push to hub
__SCREAMING_SNAKE_CASE = {
'''tasks''': '''masked-auto-encoding''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-auto-encoding'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCAmelCase__ )
else:
trainer.create_model_card(**UpperCAmelCase__ )
def _a ( UpperCAmelCase__ ) -> str:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 482
| 0
|
"""simple docstring"""
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_snake_case = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
_snake_case = get_tests_dir('''fixtures/vocab.json''')
_snake_case = get_tests_dir('''fixtures''')
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
def __lowerCamelCase ( self : Tuple ) -> Any:
"""simple docstring"""
_lowerCAmelCase = 0
def __lowerCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_lowerCAmelCase = AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(__lowercase , __lowercase )
def __lowerCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase = WavaVecaConfig()
_lowerCAmelCase = AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h' )
# save in new folder
model_config.save_pretrained(__lowercase )
processor.save_pretrained(__lowercase )
_lowerCAmelCase = AutoProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def __lowerCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(__lowercase , os.path.join(__lowercase , __lowercase ) )
copyfile(__lowercase , os.path.join(__lowercase , 'vocab.json' ) )
_lowerCAmelCase = AutoProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def __lowerCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase = WavaVecaFeatureExtractor()
_lowerCAmelCase = AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h' )
_lowerCAmelCase = WavaVecaProcessor(__lowercase , __lowercase )
# save in new folder
processor.save_pretrained(__lowercase )
# drop `processor_class` in tokenizer
with open(os.path.join(__lowercase , __lowercase ) , 'r' ) as f:
_lowerCAmelCase = json.load(__lowercase )
config_dict.pop('processor_class' )
with open(os.path.join(__lowercase , __lowercase ) , 'w' ) as f:
f.write(json.dumps(__lowercase ) )
_lowerCAmelCase = AutoProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def __lowerCamelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase = WavaVecaFeatureExtractor()
_lowerCAmelCase = AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h' )
_lowerCAmelCase = WavaVecaProcessor(__lowercase , __lowercase )
# save in new folder
processor.save_pretrained(__lowercase )
# drop `processor_class` in feature extractor
with open(os.path.join(__lowercase , __lowercase ) , 'r' ) as f:
_lowerCAmelCase = json.load(__lowercase )
config_dict.pop('processor_class' )
with open(os.path.join(__lowercase , __lowercase ) , 'w' ) as f:
f.write(json.dumps(__lowercase ) )
_lowerCAmelCase = AutoProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def __lowerCamelCase ( self : Dict ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase = WavaVecaConfig(processor_class='Wav2Vec2Processor' )
model_config.save_pretrained(__lowercase )
# copy relevant files
copyfile(__lowercase , os.path.join(__lowercase , 'vocab.json' ) )
# create emtpy sample processor
with open(os.path.join(__lowercase , __lowercase ) , 'w' ) as f:
f.write('{}' )
_lowerCAmelCase = AutoProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def __lowerCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
with self.assertRaises(__lowercase ):
_lowerCAmelCase = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowercase ):
_lowerCAmelCase = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=__lowercase )
_lowerCAmelCase = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' , trust_remote_code=__lowercase )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
_lowerCAmelCase = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
_lowerCAmelCase = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
# Test we can also load the slow version
_lowerCAmelCase = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=__lowercase , use_fast=__lowercase )
_lowerCAmelCase = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , 'NewTokenizer' )
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
def __lowerCamelCase ( self : int ) -> List[str]:
"""simple docstring"""
try:
AutoConfig.register('custom' , __lowercase )
AutoFeatureExtractor.register(__lowercase , __lowercase )
AutoTokenizer.register(__lowercase , slow_tokenizer_class=__lowercase )
AutoProcessor.register(__lowercase , __lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowercase ):
AutoProcessor.register(__lowercase , __lowercase )
# Now that the config is registered, it can be used as any other config with the auto-API
_lowerCAmelCase = CustomFeatureExtractor.from_pretrained(__lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase = os.path.join(__lowercase , 'vocab.txt' )
with open(__lowercase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
_lowerCAmelCase = CustomTokenizer(__lowercase )
_lowerCAmelCase = CustomProcessor(__lowercase , __lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(__lowercase )
_lowerCAmelCase = AutoProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __lowerCamelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
class _SCREAMING_SNAKE_CASE ( lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] = False
class _SCREAMING_SNAKE_CASE ( lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict = False
class _SCREAMING_SNAKE_CASE ( lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] = """AutoFeatureExtractor"""
SCREAMING_SNAKE_CASE_: List[str] = """AutoTokenizer"""
SCREAMING_SNAKE_CASE_: Union[str, Any] = False
try:
AutoConfig.register('custom' , __lowercase )
AutoFeatureExtractor.register(__lowercase , __lowercase )
AutoTokenizer.register(__lowercase , slow_tokenizer_class=__lowercase )
AutoProcessor.register(__lowercase , __lowercase )
# If remote code is not set, the default is to use local classes.
_lowerCAmelCase = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
_lowerCAmelCase = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=__lowercase )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
_lowerCAmelCase = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=__lowercase )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __lowerCamelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase = AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(processor.__class__.__name__ , 'BertTokenizerFast' )
def __lowerCamelCase ( self : str ) -> Any:
"""simple docstring"""
_lowerCAmelCase = AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-convnext' )
self.assertEqual(processor.__class__.__name__ , 'ConvNextImageProcessor' )
@is_staging_test
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def __lowerCamelCase ( cls : Union[str, Any] ) -> Dict:
"""simple docstring"""
_lowerCAmelCase = TOKEN
HfFolder.save_token(__lowercase )
@classmethod
def __lowerCamelCase ( cls : int ) -> Optional[Any]:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='test-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-processor' )
except HTTPError:
pass
def __lowerCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
_lowerCAmelCase = WavaVecaProcessor.from_pretrained(__lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__lowercase , 'test-processor' ) , push_to_hub=__lowercase , use_auth_token=self._token )
_lowerCAmelCase = WavaVecaProcessor.from_pretrained(F"""{USER}/test-processor""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__lowercase , getattr(new_processor.feature_extractor , __lowercase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __lowerCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase = WavaVecaProcessor.from_pretrained(__lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__lowercase , 'test-processor-org' ) , push_to_hub=__lowercase , use_auth_token=self._token , organization='valid_org' , )
_lowerCAmelCase = WavaVecaProcessor.from_pretrained('valid_org/test-processor-org' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__lowercase , getattr(new_processor.feature_extractor , __lowercase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __lowerCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
_lowerCAmelCase = CustomFeatureExtractor.from_pretrained(__lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase = os.path.join(__lowercase , 'vocab.txt' )
with open(__lowercase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
_lowerCAmelCase = CustomTokenizer(__lowercase )
_lowerCAmelCase = CustomProcessor(__lowercase , __lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F"""{USER}/test-dynamic-processor""" , token=self._token )
_lowerCAmelCase = Repository(__lowercase , clone_from=F"""{USER}/test-dynamic-processor""" , token=self._token )
processor.save_pretrained(__lowercase )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor',
'AutoProcessor': 'custom_processing.CustomProcessor',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(__lowercase , 'tokenizer_config.json' ) ) as f:
_lowerCAmelCase = json.load(__lowercase )
self.assertDictEqual(
tokenizer_config['auto_map'] , {
'AutoTokenizer': ['custom_tokenization.CustomTokenizer', None],
'AutoProcessor': 'custom_processing.CustomProcessor',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(__lowercase , 'custom_feature_extraction.py' ) ) )
self.assertTrue(os.path.isfile(os.path.join(__lowercase , 'custom_tokenization.py' ) ) )
self.assertTrue(os.path.isfile(os.path.join(__lowercase , 'custom_processing.py' ) ) )
repo.push_to_hub()
_lowerCAmelCase = AutoProcessor.from_pretrained(F"""{USER}/test-dynamic-processor""" , trust_remote_code=__lowercase )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , 'CustomProcessor' )
| 715
|
"""simple docstring"""
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str=13 , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Optional[Any]=99 , UpperCAmelCase_ : int=64 , UpperCAmelCase_ : Union[str, Any]=5 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : List[Any]=64 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Tuple=512 , UpperCAmelCase_ : Optional[Any]=16 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : Tuple=None , ) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
def __lowerCamelCase ( self : List[str] ) -> Any:
"""simple docstring"""
return MPNetConfig.from_pretrained('microsoft/mpnet-base' )
def __lowerCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __lowerCamelCase ( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase = MPNetModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowerCAmelCase = model(UpperCAmelCase_ , UpperCAmelCase_ )
_lowerCAmelCase = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCamelCase ( self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : int ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase = MPNetForQuestionAnswering(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowerCAmelCase = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] ) -> Any:
"""simple docstring"""
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = MPNetForSequenceClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowerCAmelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] ) -> Dict:
"""simple docstring"""
_lowerCAmelCase = self.num_choices
_lowerCAmelCase = MPNetForMultipleChoice(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = MPNetForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowerCAmelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = config_and_inputs
_lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_: int = (
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_: Optional[Any] = False
SCREAMING_SNAKE_CASE_: Dict = True
def __lowerCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = MPNetModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def __lowerCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*UpperCAmelCase_ )
def __lowerCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*UpperCAmelCase_ )
def __lowerCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*UpperCAmelCase_ )
def __lowerCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*UpperCAmelCase_ )
def __lowerCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*UpperCAmelCase_ )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = MPNetModel.from_pretrained('microsoft/mpnet-base' )
_lowerCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
_lowerCAmelCase = model(UpperCAmelCase_ )[0]
_lowerCAmelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , UpperCAmelCase_ )
_lowerCAmelCase = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 491
| 0
|
from manim import *
class __snake_case ( SCREAMING_SNAKE_CASE ):
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = Rectangle(height=0.5 ,width=0.5 )
lowerCAmelCase__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
lowerCAmelCase__ = Rectangle(height=0.25 ,width=0.25 )
lowerCAmelCase__ = [mem.copy() for i in range(6 )]
lowerCAmelCase__ = [mem.copy() for i in range(6 )]
lowerCAmelCase__ = VGroup(*snake_case_ ).arrange(snake_case_ ,buff=0 )
lowerCAmelCase__ = VGroup(*snake_case_ ).arrange(snake_case_ ,buff=0 )
lowerCAmelCase__ = VGroup(snake_case_ ,snake_case_ ).arrange(snake_case_ ,buff=0 )
lowerCAmelCase__ = Text('CPU' ,font_size=24 )
lowerCAmelCase__ = Group(snake_case_ ,snake_case_ ).arrange(snake_case_ ,buff=0.5 ,aligned_edge=snake_case_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case_ )
lowerCAmelCase__ = [mem.copy() for i in range(4 )]
lowerCAmelCase__ = VGroup(*snake_case_ ).arrange(snake_case_ ,buff=0 )
lowerCAmelCase__ = Text('GPU' ,font_size=24 )
lowerCAmelCase__ = Group(snake_case_ ,snake_case_ ).arrange(snake_case_ ,buff=0.5 ,aligned_edge=snake_case_ )
gpu.move_to([-1, -1, 0] )
self.add(snake_case_ )
lowerCAmelCase__ = [mem.copy() for i in range(6 )]
lowerCAmelCase__ = VGroup(*snake_case_ ).arrange(snake_case_ ,buff=0 )
lowerCAmelCase__ = Text('Model' ,font_size=24 )
lowerCAmelCase__ = Group(snake_case_ ,snake_case_ ).arrange(snake_case_ ,buff=0.5 ,aligned_edge=snake_case_ )
model.move_to([3, -1.0, 0] )
self.add(snake_case_ )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for i, rect in enumerate(snake_case_ ):
lowerCAmelCase__ = fill.copy().set_fill(snake_case_ ,opacity=0.8 )
target.move_to(snake_case_ )
model_arr.append(snake_case_ )
lowerCAmelCase__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(snake_case_ ,opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(snake_case_ )
self.add(*snake_case_ ,*snake_case_ )
lowerCAmelCase__ = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase__ = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase__ = VGroup(*snake_case_ ).arrange(snake_case_ ,buff=0 )
lowerCAmelCase__ = VGroup(*snake_case_ ).arrange(snake_case_ ,buff=0 )
lowerCAmelCase__ = VGroup(snake_case_ ,snake_case_ ).arrange(snake_case_ ,buff=0 )
lowerCAmelCase__ = Text('Disk' ,font_size=24 )
lowerCAmelCase__ = Group(snake_case_ ,snake_case_ ).arrange(snake_case_ ,buff=0.5 ,aligned_edge=snake_case_ )
disk.move_to([-4, -1.25, 0] )
self.add(snake_case_ ,snake_case_ )
lowerCAmelCase__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase__ = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(snake_case_ ,snake_case_ )
lowerCAmelCase__ = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' ,font_size=18 ,)
blue_text.next_to(snake_case_ ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(snake_case_ )
lowerCAmelCase__ = MarkupText(
f'Now watch as an input is passed through the model\nand how the memory is utilized and handled.' ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case_ ) )
lowerCAmelCase__ = Square(0.3 )
input.set_fill(snake_case_ ,opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] ,snake_case_ ,buff=0.5 )
self.play(Write(snake_case_ ) )
input.generate_target()
input.target.next_to(model_arr[0] ,direction=snake_case_ ,buff=0.02 )
self.play(MoveToTarget(snake_case_ ) )
self.play(FadeOut(snake_case_ ) )
lowerCAmelCase__ = Arrow(start=snake_case_ ,end=snake_case_ ,color=snake_case_ ,buff=0.5 )
a.next_to(model_arr[0].get_left() ,snake_case_ ,buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
lowerCAmelCase__ = MarkupText(
f'As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.' ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case_ ,run_time=3 ) )
lowerCAmelCase__ = {'''run_time''': 1, '''fade_in''': True, '''fade_out''': True, '''buff''': 0.02}
self.play(
Write(snake_case_ ) ,Circumscribe(model_arr[0] ,color=snake_case_ ,**snake_case_ ) ,Circumscribe(model_cpu_arr[0] ,color=snake_case_ ,**snake_case_ ) ,Circumscribe(gpu_rect[0] ,color=snake_case_ ,**snake_case_ ) ,)
self.play(MoveToTarget(model_cpu_arr[0] ) )
lowerCAmelCase__ = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 ,snake_case_ ,buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
lowerCAmelCase__ = AnimationGroup(
FadeOut(snake_case_ ,run_time=0.5 ) ,MoveToTarget(snake_case_ ,run_time=0.5 ) ,FadeIn(snake_case_ ,run_time=0.5 ) ,lag_ratio=0.2 )
self.play(snake_case_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
lowerCAmelCase__ = 0.7
self.play(
Circumscribe(model_arr[i] ,**snake_case_ ) ,Circumscribe(cpu_left_col_base[i] ,**snake_case_ ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=snake_case_ ,**snake_case_ ) ,Circumscribe(gpu_rect[0] ,color=snake_case_ ,**snake_case_ ) ,Circumscribe(model_arr[i + 1] ,color=snake_case_ ,**snake_case_ ) ,)
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,)
else:
self.play(
MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,)
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.02 ,buff=0.2 )
self.play(
Circumscribe(model_arr[-1] ,color=snake_case_ ,**snake_case_ ) ,Circumscribe(cpu_left_col_base[-1] ,color=snake_case_ ,**snake_case_ ) ,Circumscribe(gpu_rect[0] ,color=snake_case_ ,**snake_case_ ) ,)
self.play(MoveToTarget(model_cpu_arr[i] ) )
lowerCAmelCase__ = a_c
lowerCAmelCase__ = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] ,RIGHT + 0.02 ,buff=0.5 )
self.play(
FadeOut(snake_case_ ) ,FadeOut(snake_case_ ,run_time=0.5 ) ,)
lowerCAmelCase__ = MarkupText(f'Inference on a model too large for GPU memory\nis successfully completed.' ,font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case_ ,run_time=3 ) ,MoveToTarget(snake_case_ ) )
self.wait()
| 193
|
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class _SCREAMING_SNAKE_CASE ( snake_case ):
lowerCamelCase_ = (CMStochasticIterativeScheduler,)
lowerCamelCase_ = 1_0
def _UpperCAmelCase ( self : Any , **snake_case_ : Tuple ):
"""simple docstring"""
A : str = {
'''num_train_timesteps''': 201,
'''sigma_min''': 0.0_02,
'''sigma_max''': 80.0,
}
config.update(**snake_case_ )
return config
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
A : List[str] = 10
A : Dict = self.get_scheduler_config()
A : Optional[int] = self.scheduler_classes[0](**snake_case_ )
scheduler.set_timesteps(snake_case_ )
A : List[str] = scheduler.timesteps[0]
A : Any = scheduler.timesteps[1]
A : int = self.dummy_sample
A : str = 0.1 * sample
A : Tuple = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
A : Tuple = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=snake_case_ )
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
A : List[Any] = self.scheduler_classes[0]
A : Union[str, Any] = self.get_scheduler_config()
A : List[str] = scheduler_class(**snake_case_ )
A : str = 1
scheduler.set_timesteps(snake_case_ )
A : Optional[int] = scheduler.timesteps
A : int = torch.manual_seed(0 )
A : Optional[Any] = self.dummy_model()
A : int = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(snake_case_ ):
# 1. scale model input
A : Dict = scheduler.scale_model_input(snake_case_ , snake_case_ )
# 2. predict noise residual
A : List[Any] = model(snake_case_ , snake_case_ )
# 3. predict previous sample x_t-1
A : Union[str, Any] = scheduler.step(snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ ).prev_sample
A : Union[str, Any] = pred_prev_sample
A : List[str] = torch.sum(torch.abs(snake_case_ ) )
A : int = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 1_92.76_14 ) < 1E-2
assert abs(result_mean.item() - 0.25_10 ) < 1E-3
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
A : Tuple = self.scheduler_classes[0]
A : Tuple = self.get_scheduler_config()
A : str = scheduler_class(**snake_case_ )
A : Optional[int] = [106, 0]
scheduler.set_timesteps(timesteps=snake_case_ )
A : Optional[int] = scheduler.timesteps
A : Any = torch.manual_seed(0 )
A : Tuple = self.dummy_model()
A : str = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
A : Tuple = scheduler.scale_model_input(snake_case_ , snake_case_ )
# 2. predict noise residual
A : str = model(snake_case_ , snake_case_ )
# 3. predict previous sample x_t-1
A : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ ).prev_sample
A : str = pred_prev_sample
A : str = torch.sum(torch.abs(snake_case_ ) )
A : Union[str, Any] = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 3_47.63_57 ) < 1E-2
assert abs(result_mean.item() - 0.45_27 ) < 1E-3
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
A : Optional[int] = self.scheduler_classes[0]
A : Optional[int] = self.get_scheduler_config()
A : Any = scheduler_class(**snake_case_ )
A : Union[str, Any] = [39, 30, 12, 15, 0]
with self.assertRaises(snake_case_ , msg='''`timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=snake_case_ )
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
A : List[str] = self.scheduler_classes[0]
A : Dict = self.get_scheduler_config()
A : Tuple = scheduler_class(**snake_case_ )
A : Any = [39, 30, 12, 1, 0]
A : List[Any] = len(snake_case_ )
with self.assertRaises(snake_case_ , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=snake_case_ , timesteps=snake_case_ )
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
A : List[Any] = self.scheduler_classes[0]
A : str = self.get_scheduler_config()
A : List[Any] = scheduler_class(**snake_case_ )
A : Dict = [scheduler.config.num_train_timesteps]
with self.assertRaises(
snake_case_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=snake_case_ )
| 256
| 0
|
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
UpperCAmelCase_ = ''''''
if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''):
class __SCREAMING_SNAKE_CASE ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ = " " ):
"""simple docstring"""
_snake_case : str = sentence_delimiter
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
return list(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_snake_case : Any = []
for sent_idx, sentence in enumerate(SCREAMING_SNAKE_CASE__ ):
chars.extend(self.process_string(SCREAMING_SNAKE_CASE__ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(SCREAMING_SNAKE_CASE__ ) - 1:
chars.append(self.sentence_delimiter )
return chars
UpperCAmelCase_ = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
UpperCAmelCase_ = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
UpperCAmelCase_ = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
UpperCAmelCase_ = '''\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
'''
UpperCAmelCase_ = '''
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> cer = datasets.load_metric("cer")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def __lowerCamelCase( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
"""https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates""",
] , )
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ):
"""simple docstring"""
if concatenate_texts:
return jiwer.compute_measures(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , truth_transform=SCREAMING_SNAKE_CASE__ , hypothesis_transform=SCREAMING_SNAKE_CASE__ , )["wer"]
_snake_case : Tuple = 0
_snake_case : List[Any] = 0
for prediction, reference in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_snake_case : Optional[int] = jiwer.compute_measures(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , truth_transform=SCREAMING_SNAKE_CASE__ , hypothesis_transform=SCREAMING_SNAKE_CASE__ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 718
|
from collections.abc import Iterable
from typing import Generic, TypeVar
UpperCAmelCase_ = TypeVar('''_T''')
class __SCREAMING_SNAKE_CASE ( Generic[_T] ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ = None ):
"""simple docstring"""
_snake_case : list[_T] = list(iterable or [] )
_snake_case : list[_T] = []
def __len__( self ):
"""simple docstring"""
return len(self._stacka ) + len(self._stacka )
def __repr__( self ):
"""simple docstring"""
return f'''Queue({tuple(self._stacka[::-1] + self._stacka )})'''
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
self._stacka.append(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : Optional[int] = self._stacka.pop
_snake_case : Optional[int] = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("""Queue is empty""" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 519
| 0
|
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class snake_case_ ( __UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = RoFormerTokenizer
snake_case__ = RoFormerTokenizerFast
snake_case__ = True
snake_case__ = True
def UpperCAmelCase__ (self: List[Any] ) -> List[Any]:
'''simple docstring'''
super().setUp()
def UpperCAmelCase__ (self: Dict , **__UpperCAmelCase: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **__UpperCAmelCase )
def UpperCAmelCase__ (self: Dict , **__UpperCAmelCase: Tuple ) -> Optional[int]:
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **__UpperCAmelCase )
def UpperCAmelCase__ (self: List[Any] ) -> List[str]:
'''simple docstring'''
__a : Union[str, Any] = "永和服装饰品有限公司,今天天气非常好"
__a : Dict = "永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"
return input_text, output_text
def UpperCAmelCase__ (self: List[Any] ) -> List[str]:
'''simple docstring'''
__a : str = self.get_tokenizer()
__a , __a : Tuple = self.get_chinese_input_output_texts()
__a : Union[str, Any] = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , output_text.split() )
__a : Any = tokens + [tokenizer.unk_token]
__a : Dict = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
def UpperCAmelCase__ (self: Optional[int] ) -> Any:
'''simple docstring'''
__a : List[Any] = self.get_rust_tokenizer()
__a , __a : Optional[int] = self.get_chinese_input_output_texts()
__a : Any = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , output_text.split() )
__a : Dict = tokens + [tokenizer.unk_token]
__a : Optional[Any] = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
def UpperCAmelCase__ (self: Optional[int] ) -> int:
'''simple docstring'''
pass
def UpperCAmelCase__ (self: Optional[Any] ) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase__ (self: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
| 351
|
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
UpperCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(__UpperCamelCase )
class snake_case_ ( __UpperCamelCase ):
"""simple docstring"""
def __init__(self: int , **__UpperCAmelCase: Optional[Any] ) -> str:
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
requires_backends(self , "vision" )
requires_backends(self , "torch" )
if self.framework != "pt":
raise ValueError(f'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(__UpperCAmelCase )
def UpperCAmelCase__ (self: Optional[int] , **__UpperCAmelCase: Optional[int] ) -> List[Any]:
'''simple docstring'''
__a : Any = {}
__a : Union[str, Any] = {}
__a : List[str] = {}
# preprocess args
if "points_per_batch" in kwargs:
__a : Dict = kwargs["points_per_batch"]
if "points_per_crop" in kwargs:
__a : Tuple = kwargs["points_per_crop"]
if "crops_n_layers" in kwargs:
__a : Dict = kwargs["crops_n_layers"]
if "crop_overlap_ratio" in kwargs:
__a : int = kwargs["crop_overlap_ratio"]
if "crop_n_points_downscale_factor" in kwargs:
__a : Optional[int] = kwargs["crop_n_points_downscale_factor"]
# postprocess args
if "pred_iou_thresh" in kwargs:
__a : int = kwargs["pred_iou_thresh"]
if "stability_score_offset" in kwargs:
__a : Any = kwargs["stability_score_offset"]
if "mask_threshold" in kwargs:
__a : int = kwargs["mask_threshold"]
if "stability_score_thresh" in kwargs:
__a : Any = kwargs["stability_score_thresh"]
if "crops_nms_thresh" in kwargs:
__a : int = kwargs["crops_nms_thresh"]
if "output_rle_mask" in kwargs:
__a : Optional[Any] = kwargs["output_rle_mask"]
if "output_bboxes_mask" in kwargs:
__a : Optional[Any] = kwargs["output_bboxes_mask"]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__(self: Union[str, Any] , __UpperCAmelCase: str , *__UpperCAmelCase: Optional[Any] , __UpperCAmelCase: str=None , __UpperCAmelCase: Union[str, Any]=None , **__UpperCAmelCase: Optional[int] ) -> Tuple:
'''simple docstring'''
return super().__call__(__UpperCAmelCase , *__UpperCAmelCase , num_workers=__UpperCAmelCase , batch_size=__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase__ (self: Optional[Any] , __UpperCAmelCase: List[str] , __UpperCAmelCase: int=64 , __UpperCAmelCase: int = 0 , __UpperCAmelCase: float = 512 / 1500 , __UpperCAmelCase: Optional[int] = 32 , __UpperCAmelCase: Optional[int] = 1 , ) -> str:
'''simple docstring'''
__a : Union[str, Any] = load_image(__UpperCAmelCase )
__a : List[str] = self.image_processor.size["longest_edge"]
__a , __a , __a , __a : int = self.image_processor.generate_crop_boxes(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__a : Union[str, Any] = self.image_processor(images=__UpperCAmelCase , return_tensors="pt" )
with self.device_placement():
if self.framework == "pt":
__a : str = self.get_inference_context()
with inference_context():
__a : int = self._ensure_tensor_on_device(__UpperCAmelCase , device=self.device )
__a : Union[str, Any] = self.model.get_image_embeddings(model_inputs.pop("pixel_values" ) )
__a : Optional[Any] = image_embeddings
__a : Dict = grid_points.shape[1]
__a : Any = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. "
"To return all points at once, set points_per_batch to None" )
for i in range(0 , __UpperCAmelCase , __UpperCAmelCase ):
__a : List[str] = grid_points[:, i : i + points_per_batch, :, :]
__a : str = input_labels[:, i : i + points_per_batch]
__a : str = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCAmelCase__ (self: Any , __UpperCAmelCase: Optional[Any] , __UpperCAmelCase: Any=0.88 , __UpperCAmelCase: Optional[int]=0.95 , __UpperCAmelCase: Optional[int]=0 , __UpperCAmelCase: int=1 , ) -> List[str]:
'''simple docstring'''
__a : List[Any] = model_inputs.pop("input_boxes" )
__a : Optional[Any] = model_inputs.pop("is_last" )
__a : Dict = model_inputs.pop("original_sizes" ).tolist()
__a : int = model_inputs.pop("reshaped_input_sizes" ).tolist()
__a : Optional[Any] = self.model(**__UpperCAmelCase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
__a : Union[str, Any] = model_outputs["pred_masks"]
__a : Optional[Any] = self.image_processor.post_process_masks(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , binarize=__UpperCAmelCase )
__a : Optional[Any] = model_outputs["iou_scores"]
__a , __a , __a : Dict = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCAmelCase__ (self: Any , __UpperCAmelCase: str , __UpperCAmelCase: List[str]=False , __UpperCAmelCase: int=False , __UpperCAmelCase: List[str]=0.7 , ) -> Dict:
'''simple docstring'''
__a : Dict = []
__a : Optional[Any] = []
__a : Optional[Any] = []
for model_output in model_outputs:
all_scores.append(model_output.pop("iou_scores" ) )
all_masks.extend(model_output.pop("masks" ) )
all_boxes.append(model_output.pop("boxes" ) )
__a : Any = torch.cat(__UpperCAmelCase )
__a : Union[str, Any] = torch.cat(__UpperCAmelCase )
__a , __a , __a , __a : Tuple = self.image_processor.post_process_for_mask_generation(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__a : Optional[int] = defaultdict(__UpperCAmelCase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(__UpperCAmelCase )
__a : int = {}
if output_rle_mask:
__a : Optional[int] = rle_mask
if output_bboxes_mask:
__a : List[str] = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 351
| 1
|
from __future__ import annotations
def __UpperCamelCase ( A ):
if len(A ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
UpperCamelCase__ = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 469
|
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
__magic_name__ =logging.get_logger(__name__) # pylint: disable=invalid-name
class _A ( __UpperCamelCase ):
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=768 ) -> Dict:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = proj_size
UpperCamelCase__ = CLIPVisionModel(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = PaintByExampleMapper(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = nn.LayerNorm(config.hidden_size )
UpperCamelCase__ = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
UpperCamelCase__ = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = self.model(pixel_values=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = clip_output.pooler_output
UpperCamelCase__ = self.mapper(latent_states[:, None] )
UpperCamelCase__ = self.final_layer_norm(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.proj_out(SCREAMING_SNAKE_CASE_ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class _A ( nn.Module ):
def __init__(self , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
super().__init__()
UpperCamelCase__ = (config.num_hidden_layers + 1) // 5
UpperCamelCase__ = config.hidden_size
UpperCamelCase__ = 1
UpperCamelCase__ = nn.ModuleList(
[
BasicTransformerBlock(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , activation_fn='''gelu''' , attention_bias=SCREAMING_SNAKE_CASE_ )
for _ in range(SCREAMING_SNAKE_CASE_ )
] )
def _a (self , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
for block in self.blocks:
UpperCamelCase__ = block(SCREAMING_SNAKE_CASE_ )
return hidden_states
| 469
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 59
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A = {
"configuration_pix2struct": [
"PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Pix2StructConfig",
"Pix2StructTextConfig",
"Pix2StructVisionConfig",
],
"processing_pix2struct": ["Pix2StructProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["Pix2StructImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Pix2StructPreTrainedModel",
"Pix2StructForConditionalGeneration",
"Pix2StructVisionModel",
"Pix2StructTextModel",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 59
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
_lowercase = logging.get_logger('transformers.models.encodec')
_lowercase = {
'quantizer.vq.layers.*._codebook.inited': 'quantizer.layers.*.codebook.inited',
'quantizer.vq.layers.*._codebook.cluster_size': 'quantizer.layers.*.codebook.cluster_size',
'quantizer.vq.layers.*._codebook.embed': 'quantizer.layers.*.codebook.embed',
'quantizer.vq.layers.*._codebook.embed_avg': 'quantizer.layers.*.codebook.embed_avg',
}
_lowercase = {
'encoder.model.0.conv.conv': 'encoder.layers.0.conv',
'encoder.model.1.block.1.conv.conv': 'encoder.layers.1.block.1.conv',
'encoder.model.1.block.3.conv.conv': 'encoder.layers.1.block.3.conv',
'encoder.model.1.shortcut.conv.conv': 'encoder.layers.1.shortcut.conv',
'encoder.model.3.conv.conv': 'encoder.layers.3.conv',
'encoder.model.4.block.1.conv.conv': 'encoder.layers.4.block.1.conv',
'encoder.model.4.block.3.conv.conv': 'encoder.layers.4.block.3.conv',
'encoder.model.4.shortcut.conv.conv': 'encoder.layers.4.shortcut.conv',
'encoder.model.6.conv.conv': 'encoder.layers.6.conv',
'encoder.model.7.block.1.conv.conv': 'encoder.layers.7.block.1.conv',
'encoder.model.7.block.3.conv.conv': 'encoder.layers.7.block.3.conv',
'encoder.model.7.shortcut.conv.conv': 'encoder.layers.7.shortcut.conv',
'encoder.model.9.conv.conv': 'encoder.layers.9.conv',
'encoder.model.10.block.1.conv.conv': 'encoder.layers.10.block.1.conv',
'encoder.model.10.block.3.conv.conv': 'encoder.layers.10.block.3.conv',
'encoder.model.10.shortcut.conv.conv': 'encoder.layers.10.shortcut.conv',
'encoder.model.12.conv.conv': 'encoder.layers.12.conv',
'encoder.model.13.lstm': 'encoder.layers.13.lstm',
'encoder.model.15.conv.conv': 'encoder.layers.15.conv',
}
_lowercase = {
'encoder.model.0.conv.norm': 'encoder.layers.0.norm',
'encoder.model.1.block.1.conv.norm': 'encoder.layers.1.block.1.norm',
'encoder.model.1.block.3.conv.norm': 'encoder.layers.1.block.3.norm',
'encoder.model.1.shortcut.conv.norm': 'encoder.layers.1.shortcut.norm',
'encoder.model.3.conv.norm': 'encoder.layers.3.norm',
'encoder.model.4.block.1.conv.norm': 'encoder.layers.4.block.1.norm',
'encoder.model.4.block.3.conv.norm': 'encoder.layers.4.block.3.norm',
'encoder.model.4.shortcut.conv.norm': 'encoder.layers.4.shortcut.norm',
'encoder.model.6.conv.norm': 'encoder.layers.6.norm',
'encoder.model.7.block.1.conv.norm': 'encoder.layers.7.block.1.norm',
'encoder.model.7.block.3.conv.norm': 'encoder.layers.7.block.3.norm',
'encoder.model.7.shortcut.conv.norm': 'encoder.layers.7.shortcut.norm',
'encoder.model.9.conv.norm': 'encoder.layers.9.norm',
'encoder.model.10.block.1.conv.norm': 'encoder.layers.10.block.1.norm',
'encoder.model.10.block.3.conv.norm': 'encoder.layers.10.block.3.norm',
'encoder.model.10.shortcut.conv.norm': 'encoder.layers.10.shortcut.norm',
'encoder.model.12.conv.norm': 'encoder.layers.12.norm',
'encoder.model.15.conv.norm': 'encoder.layers.15.norm',
}
_lowercase = {
'decoder.model.0.conv.conv': 'decoder.layers.0.conv',
'decoder.model.1.lstm': 'decoder.layers.1.lstm',
'decoder.model.3.convtr.convtr': 'decoder.layers.3.conv',
'decoder.model.4.block.1.conv.conv': 'decoder.layers.4.block.1.conv',
'decoder.model.4.block.3.conv.conv': 'decoder.layers.4.block.3.conv',
'decoder.model.4.shortcut.conv.conv': 'decoder.layers.4.shortcut.conv',
'decoder.model.6.convtr.convtr': 'decoder.layers.6.conv',
'decoder.model.7.block.1.conv.conv': 'decoder.layers.7.block.1.conv',
'decoder.model.7.block.3.conv.conv': 'decoder.layers.7.block.3.conv',
'decoder.model.7.shortcut.conv.conv': 'decoder.layers.7.shortcut.conv',
'decoder.model.9.convtr.convtr': 'decoder.layers.9.conv',
'decoder.model.10.block.1.conv.conv': 'decoder.layers.10.block.1.conv',
'decoder.model.10.block.3.conv.conv': 'decoder.layers.10.block.3.conv',
'decoder.model.10.shortcut.conv.conv': 'decoder.layers.10.shortcut.conv',
'decoder.model.12.convtr.convtr': 'decoder.layers.12.conv',
'decoder.model.13.block.1.conv.conv': 'decoder.layers.13.block.1.conv',
'decoder.model.13.block.3.conv.conv': 'decoder.layers.13.block.3.conv',
'decoder.model.13.shortcut.conv.conv': 'decoder.layers.13.shortcut.conv',
'decoder.model.15.conv.conv': 'decoder.layers.15.conv',
}
_lowercase = {
'decoder.model.0.conv.norm': 'decoder.layers.0.norm',
'decoder.model.3.convtr.norm': 'decoder.layers.3.norm',
'decoder.model.4.block.1.conv.norm': 'decoder.layers.4.block.1.norm',
'decoder.model.4.block.3.conv.norm': 'decoder.layers.4.block.3.norm',
'decoder.model.4.shortcut.conv.norm': 'decoder.layers.4.shortcut.norm',
'decoder.model.6.convtr.norm': 'decoder.layers.6.norm',
'decoder.model.7.block.1.conv.norm': 'decoder.layers.7.block.1.norm',
'decoder.model.7.block.3.conv.norm': 'decoder.layers.7.block.3.norm',
'decoder.model.7.shortcut.conv.norm': 'decoder.layers.7.shortcut.norm',
'decoder.model.9.convtr.norm': 'decoder.layers.9.norm',
'decoder.model.10.block.1.conv.norm': 'decoder.layers.10.block.1.norm',
'decoder.model.10.block.3.conv.norm': 'decoder.layers.10.block.3.norm',
'decoder.model.10.shortcut.conv.norm': 'decoder.layers.10.shortcut.norm',
'decoder.model.12.convtr.norm': 'decoder.layers.12.norm',
'decoder.model.13.block.1.conv.norm': 'decoder.layers.13.block.1.norm',
'decoder.model.13.block.3.conv.norm': 'decoder.layers.13.block.3.norm',
'decoder.model.13.shortcut.conv.norm': 'decoder.layers.13.shortcut.norm',
'decoder.model.15.conv.norm': 'decoder.layers.15.norm',
}
_lowercase = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
_lowercase = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
_lowercase = []
_lowercase = []
def __UpperCamelCase ( a : Any , a : List[Any] , a : List[Any] , a : List[str] , a : str ) ->Any:
for attribute in key.split('''.''' ):
snake_case = getattr(__UpperCamelCase , __UpperCamelCase )
if weight_type is not None:
snake_case = getattr(__UpperCamelCase , __UpperCamelCase ).shape
else:
snake_case = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
snake_case = value
elif weight_type == "weight_g":
snake_case = value
elif weight_type == "weight_v":
snake_case = value
elif weight_type == "bias":
snake_case = value
elif weight_type == "running_mean":
snake_case = value
elif weight_type == "running_var":
snake_case = value
elif weight_type == "num_batches_tracked":
snake_case = value
elif weight_type == "weight_ih_l0":
snake_case = value
elif weight_type == "weight_hh_l0":
snake_case = value
elif weight_type == "bias_ih_l0":
snake_case = value
elif weight_type == "bias_hh_l0":
snake_case = value
elif weight_type == "weight_ih_l1":
snake_case = value
elif weight_type == "weight_hh_l1":
snake_case = value
elif weight_type == "bias_ih_l1":
snake_case = value
elif weight_type == "bias_hh_l1":
snake_case = value
else:
snake_case = value
logger.info(f"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def __UpperCamelCase ( a : Dict , a : str ) ->Union[str, Any]:
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
snake_case , snake_case = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __UpperCamelCase ( a : Tuple , a : Tuple , a : Union[str, Any] ) ->Any:
snake_case = []
if model_name == "encodec_24khz" or "encodec_32khz":
snake_case = MAPPING_24K
elif model_name == "encodec_48khz":
snake_case = MAPPING_48K
else:
raise ValueError(f"""Unsupported model: {model_name}""" )
for name, value in orig_dict.items():
if should_ignore(__UpperCamelCase , __UpperCamelCase ):
logger.info(f"""{name} was ignored""" )
continue
snake_case = False
for key, mapped_key in MAPPING.items():
if "*" in key:
snake_case , snake_case = key.split('''.*.''' )
if prefix in name and suffix in name:
snake_case = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ):
continue
snake_case = True
if "*" in mapped_key:
snake_case = name.split(__UpperCamelCase )[0].split('''.''' )[-2]
snake_case = mapped_key.replace('''*''' , __UpperCamelCase )
if "weight_g" in name:
snake_case = '''weight_g'''
elif "weight_v" in name:
snake_case = '''weight_v'''
elif "weight_ih_l0" in name:
snake_case = '''weight_ih_l0'''
elif "weight_hh_l0" in name:
snake_case = '''weight_hh_l0'''
elif "bias_ih_l0" in name:
snake_case = '''bias_ih_l0'''
elif "bias_hh_l0" in name:
snake_case = '''bias_hh_l0'''
elif "weight_ih_l1" in name:
snake_case = '''weight_ih_l1'''
elif "weight_hh_l1" in name:
snake_case = '''weight_hh_l1'''
elif "bias_ih_l1" in name:
snake_case = '''bias_ih_l1'''
elif "bias_hh_l1" in name:
snake_case = '''bias_hh_l1'''
elif "bias" in name:
snake_case = '''bias'''
elif "weight" in name:
snake_case = '''weight'''
elif "running_mean" in name:
snake_case = '''running_mean'''
elif "running_var" in name:
snake_case = '''running_var'''
elif "num_batches_tracked" in name:
snake_case = '''num_batches_tracked'''
else:
snake_case = None
set_recursively(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
continue
if not is_used:
unused_weights.append(__UpperCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
@torch.no_grad()
def __UpperCamelCase ( a : List[str] , a : int , a : Optional[int] , a : Dict=None , a : List[Any]=None , ) ->int:
if config_path is not None:
snake_case = EncodecConfig.from_pretrained(__UpperCamelCase )
else:
snake_case = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
snake_case = [8, 5, 4, 4]
snake_case = [2.2]
snake_case = 64
snake_case = 3_2000
snake_case = 2048
snake_case = False
snake_case = False
snake_case = False
elif model_name == "encodec_48khz":
snake_case = [8, 5, 4, 2]
snake_case = [3.0, 6.0, 12.0, 24.0]
snake_case = 4_8000
snake_case = 2
snake_case = False
snake_case = '''time_group_norm'''
snake_case = True
snake_case = 1.0
snake_case = 0.01
else:
raise ValueError(f"""Unknown model name: {model_name}""" )
snake_case = EncodecModel(__UpperCamelCase )
snake_case = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(__UpperCamelCase )
snake_case = torch.load(__UpperCamelCase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
snake_case = original_checkpoint['''best_state''']
recursively_load_weights(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
if repo_id:
print('''Pushing to the hub...''' )
feature_extractor.push_to_hub(__UpperCamelCase )
model.push_to_hub(__UpperCamelCase )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument(
'--model',
default='encodec_24khz',
type=str,
help='The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
_lowercase = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 707
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
def __init__( self , A__ , A__=13 , A__=30 , A__=2 , A__=3 , A__=True , A__=True , A__=32 , A__=2 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=10 , A__=0.0_2 , A__=3 , A__=None , ) -> List[Any]:
snake_case = parent
snake_case = batch_size
snake_case = image_size
snake_case = patch_size
snake_case = num_channels
snake_case = is_training
snake_case = use_labels
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case = (image_size // patch_size) ** 2
snake_case = num_patches + 1
def UpperCamelCase ( self ) -> int:
snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self ) -> int:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A__ , initializer_range=self.initializer_range , )
def UpperCamelCase ( self , A__ , A__ , A__ ) -> Union[str, Any]:
snake_case = TFViTModel(config=A__ )
snake_case = model(A__ , training=A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
snake_case = self.image_size // 2
snake_case = pixel_values[:, :, :image_size, :image_size]
snake_case = model(A__ , interpolate_pos_encoding=A__ , training=A__ )
snake_case = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def UpperCamelCase ( self , A__ , A__ , A__ ) -> Optional[int]:
snake_case = self.type_sequence_label_size
snake_case = TFViTForImageClassification(A__ )
snake_case = model(A__ , labels=A__ , training=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
snake_case = self.image_size // 2
snake_case = pixel_values[:, :, :image_size, :image_size]
snake_case = model(A__ , interpolate_pos_encoding=A__ , training=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case = 1
snake_case = TFViTForImageClassification(A__ )
snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case = config_and_inputs
snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _lowercase ( __a , __a , unittest.TestCase ):
_UpperCAmelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
_UpperCAmelCase = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def UpperCamelCase ( self ) -> List[Any]:
snake_case = TFViTModelTester(self )
snake_case = ConfigTester(self , config_class=A__ , has_text_modality=A__ , hidden_size=37 )
def UpperCamelCase ( self ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def UpperCamelCase ( self ) -> int:
pass
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def UpperCamelCase ( self ) -> str:
pass
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(A__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A__ , tf.keras.layers.Layer ) )
def UpperCamelCase ( self ) -> List[Any]:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(A__ )
snake_case = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case = [*signature.parameters.keys()]
snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A__ )
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def UpperCamelCase ( self ) -> Optional[Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A__ )
@slow
def UpperCamelCase ( self ) -> Any:
snake_case = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(A__ )
def __UpperCamelCase ( ) ->Any:
snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self ) -> Optional[int]:
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def UpperCamelCase ( self ) -> Dict:
snake_case = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' )
snake_case = self.default_image_processor
snake_case = prepare_img()
snake_case = image_processor(images=A__ , return_tensors='''tf''' )
# forward pass
snake_case = model(**A__ )
# verify the logits
snake_case = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , A__ )
snake_case = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3] , A__ , atol=1e-4 )
| 44
| 0
|
'''simple docstring'''
from collections import defaultdict
from math import gcd
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 1_50_00_00 ) -> int:
__lowerCamelCase : defaultdict = defaultdict(UpperCAmelCase_ )
__lowerCamelCase : Any = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , UpperCAmelCase_ , 2 ):
if gcd(UpperCAmelCase_ , UpperCAmelCase_ ) > 1:
continue
__lowerCamelCase : Any = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(UpperCAmelCase_ , limit + 1 , UpperCAmelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13
|
'''simple docstring'''
from __future__ import annotations
import os
from collections.abc import Mapping
A__ : Optional[Any] = tuple[int, int]
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
__lowerCamelCase : set[int] = vertices
__lowerCamelCase : dict[EdgeT, int] = {
(min(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ )): weight for edge, weight in edges.items()
}
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
__lowerCamelCase : Union[str, Any] = weight
def lowercase_ ( self ) -> Graph:
__lowerCamelCase : Graph = Graph({min(self.vertices )} , {} )
__lowerCamelCase : EdgeT
__lowerCamelCase : int
__lowerCamelCase : EdgeT
__lowerCamelCase : int
while len(subgraph.vertices ) < len(self.vertices ):
__lowerCamelCase : Any = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
__lowerCamelCase : Optional[int] = edge
__lowerCamelCase : List[str] = weight
subgraph.add_edge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return subgraph
def UpperCAmelCase__ ( UpperCAmelCase_ : str = "p107_network.txt" ) -> int:
__lowerCamelCase : str = os.path.abspath(os.path.dirname(UpperCAmelCase_ ) )
__lowerCamelCase : str = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : dict[EdgeT, int] = {}
__lowerCamelCase : list[str]
__lowerCamelCase : int
__lowerCamelCase : int
with open(UpperCAmelCase_ ) as f:
__lowerCamelCase : Any = f.read().strip().split('\n' )
__lowerCamelCase : Any = [line.split(',' ) for line in data]
for edgea in range(1 , len(UpperCAmelCase_ ) ):
for edgea in range(UpperCAmelCase_ ):
if adjaceny_matrix[edgea][edgea] != "-":
__lowerCamelCase : int = int(adjaceny_matrix[edgea][edgea] )
__lowerCamelCase : Graph = Graph(set(range(len(UpperCAmelCase_ ) ) ) , UpperCAmelCase_ )
__lowerCamelCase : Graph = graph.prims_algorithm()
__lowerCamelCase : int = sum(graph.edges.values() )
__lowerCamelCase : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCAmelCase = {
'''vocab_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
__lowerCAmelCase = {
'''yjernite/retribert-base-uncased''': 5_12,
}
__lowerCAmelCase = {
'''yjernite/retribert-base-uncased''': {'''do_lower_case''': True},
}
class __a ( __UpperCamelCase ):
__lowercase : Tuple = VOCAB_FILES_NAMES
__lowercase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
__lowercase : Optional[int] = RetriBertTokenizer
__lowercase : Tuple = ['input_ids', 'attention_mask']
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__="[UNK]" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="[PAD]" , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[MASK]" , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
lowercase__: Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get('strip_accents' , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowerCAmelCase__ ) != tokenize_chinese_chars
):
lowercase__: Any = getattr(lowerCAmelCase__ , normalizer_state.pop('type' ) )
lowercase__: List[Any] = do_lower_case
lowercase__: Union[str, Any] = strip_accents
lowercase__: int = tokenize_chinese_chars
lowercase__: List[Any] = normalizer_class(**lowerCAmelCase__ )
lowercase__: Optional[Any] = do_lower_case
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> List[str]:
'''simple docstring'''
lowercase__: Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
'''simple docstring'''
lowercase__: Optional[int] = [self.sep_token_id]
lowercase__: Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
lowercase__: Any = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 709
|
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class __a ( __UpperCamelCase , unittest.TestCase ):
__lowercase : Optional[int] = AlbertTokenizer
__lowercase : str = AlbertTokenizerFast
__lowercase : List[Any] = True
__lowercase : int = True
__lowercase : Any = True
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__: Optional[int] = AlbertTokenizer(lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
lowercase__: Dict = 'this is a test'
lowercase__: List[str] = 'this is a test'
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Dict = '<pad>'
lowercase__: Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
lowercase__: Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '▁eloquent' )
self.assertEqual(len(lowerCAmelCase__ ) , 30_000 )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase__: Optional[int] = self.get_tokenizer()
lowercase__: List[Any] = self.get_rust_tokenizer()
lowercase__: str = 'I was born in 92000, and this is falsé.'
lowercase__: Any = tokenizer.tokenize(lowerCAmelCase__ )
lowercase__: Optional[Any] = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__: Union[str, Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
lowercase__: Tuple = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__: int = self.get_rust_tokenizer()
lowercase__: str = tokenizer.encode(lowerCAmelCase__ )
lowercase__: str = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: Union[str, Any] = AlbertTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
lowercase__: Optional[int] = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowerCAmelCase__ , ['▁this', '▁is', '▁a', '▁test'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [48, 25, 21, 1_289] )
lowercase__: Optional[int] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowerCAmelCase__ , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.'] )
lowercase__: Optional[int] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] )
lowercase__: str = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'] , )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__: Optional[int] = AlbertTokenizer(lowerCAmelCase__ )
lowercase__: List[Any] = tokenizer.encode('sequence builders' )
lowercase__: Tuple = tokenizer.encode('multi-sequence build' )
lowercase__: str = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
lowercase__: Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
# fmt: off
lowercase__: List[Any] = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name='albert-base-v2' , revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e' , )
| 335
| 0
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class lowerCamelCase_ :
def __init__( self , lowerCamelCase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = data
_UpperCamelCase = [0x6745_2301, 0xefcd_ab89, 0x98ba_dcfe, 0x1032_5476, 0xc3d2_e1f0]
@staticmethod
def lowercase ( lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
"""simple docstring"""
return ((n << b) | (n >> (32 - b))) & 0xffff_ffff
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = B'''\x80''' + B'''\x00''' * (63 - (len(self.data ) + 8) % 64)
_UpperCamelCase = self.data + padding + struct.pack(">Q" , 8 * len(self.data ) )
return padded_data
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def lowercase ( self , lowerCamelCase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = list(struct.unpack(">16L" , lowerCamelCase_ ) ) + [0] * 64
for i in range(16 , 80 ):
_UpperCamelCase = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self.padding()
_UpperCamelCase = self.split_blocks()
for block in self.blocks:
_UpperCamelCase = self.expand_block(lowerCamelCase_ )
_UpperCamelCase = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
_UpperCamelCase = (b & c) | ((~b) & d)
_UpperCamelCase = 0x5a82_7999
elif 20 <= i < 40:
_UpperCamelCase = b ^ c ^ d
_UpperCamelCase = 0x6ed9_eba1
elif 40 <= i < 60:
_UpperCamelCase = (b & c) | (b & d) | (c & d)
_UpperCamelCase = 0x8f1b_bcdc
elif 60 <= i < 80:
_UpperCamelCase = b ^ c ^ d
_UpperCamelCase = 0xca62_c1d6
_UpperCamelCase = (
self.rotate(lowerCamelCase_ , 5 ) + f + e + k + expanded_block[i] & 0xffff_ffff,
a,
self.rotate(lowerCamelCase_ , 30 ),
c,
d,
)
_UpperCamelCase = (
self.h[0] + a & 0xffff_ffff,
self.h[1] + b & 0xffff_ffff,
self.h[2] + c & 0xffff_ffff,
self.h[3] + d & 0xffff_ffff,
self.h[4] + e & 0xffff_ffff,
)
return ("{:08x}" * 5).format(*self.h )
def _lowercase ( ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = B'''Test String'''
assert SHAaHash(a_ ).final_hash() == hashlib.shaa(a_ ).hexdigest() # noqa: S324
def _lowercase ( ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = argparse.ArgumentParser(description="Process some strings or files" )
parser.add_argument(
"--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument("--file" , dest="input_file" , help="Hash contents of a file" )
_UpperCamelCase = parser.parse_args()
_UpperCamelCase = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
_UpperCamelCase = f.read()
else:
_UpperCamelCase = bytes(a_ , "utf-8" )
print(SHAaHash(a_ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 147
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __A ( a_ : int=None )-> Tuple:
'''simple docstring'''
if subparsers is not None:
SCREAMING_SNAKE_CASE : List[str] = subparsers.add_parser('''test''' )
else:
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=a_ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=a_ )
return parser
def __A ( a_ : Any )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
SCREAMING_SNAKE_CASE : Tuple = script_name
else:
SCREAMING_SNAKE_CASE : Optional[Any] = F"--config_file={args.config_file} {script_name}"
SCREAMING_SNAKE_CASE : str = ['''accelerate-launch'''] + test_args.split()
SCREAMING_SNAKE_CASE : List[str] = execute_subprocess_async(a_ , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def __A ( )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = test_command_parser()
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
test_command(a_ )
if __name__ == "__main__":
main()
| 698
| 0
|
'''simple docstring'''
import math
def __UpperCAmelCase ( a_: int ):
_UpperCAmelCase : Union[str, Any] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(a_ )
def __UpperCAmelCase ( a_: float = 1 / 12_345 ):
_UpperCAmelCase : Dict = 0
_UpperCAmelCase : str = 0
_UpperCAmelCase : List[str] = 3
while True:
_UpperCAmelCase : Union[str, Any] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(a_ ):
_UpperCAmelCase : str = int(a_ )
total_partitions += 1
if check_partition_perfect(a_ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(a_ )
integer += 1
if __name__ == "__main__":
print(f'{solution() = }')
| 257
|
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def __UpperCAmelCase ( a_: List[Any], a_: Optional[Any], a_: int ):
_UpperCAmelCase : List[str] = UniSpeechSatForSequenceClassification.from_pretrained(a_, config=a_ )
_UpperCAmelCase : List[str] = downstream_dict["projector.weight"]
_UpperCAmelCase : List[Any] = downstream_dict["projector.bias"]
_UpperCAmelCase : Union[str, Any] = downstream_dict["model.post_net.linear.weight"]
_UpperCAmelCase : Dict = downstream_dict["model.post_net.linear.bias"]
return model
def __UpperCAmelCase ( a_: Optional[Any], a_: Union[str, Any], a_: str ):
_UpperCAmelCase : Optional[Any] = UniSpeechSatForAudioFrameClassification.from_pretrained(a_, config=a_ )
_UpperCAmelCase : List[str] = downstream_dict["model.linear.weight"]
_UpperCAmelCase : Optional[int] = downstream_dict["model.linear.bias"]
return model
def __UpperCAmelCase ( a_: Tuple, a_: List[str], a_: Any ):
_UpperCAmelCase : int = UniSpeechSatForXVector.from_pretrained(a_, config=a_ )
_UpperCAmelCase : List[Any] = downstream_dict["connector.weight"]
_UpperCAmelCase : Optional[int] = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_UpperCAmelCase : str = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
_UpperCAmelCase : Union[str, Any] = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
_UpperCAmelCase : Optional[int] = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
_UpperCAmelCase : Any = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
_UpperCAmelCase : Dict = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
_UpperCAmelCase : Dict = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
_UpperCAmelCase : List[Any] = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __UpperCAmelCase ( a_: Optional[Any], a_: Tuple, a_: Any, a_: Optional[Any] ):
_UpperCAmelCase : Any = torch.load(a_, map_location="cpu" )
_UpperCAmelCase : Tuple = checkpoint["Downstream"]
_UpperCAmelCase : Dict = UniSpeechSatConfig.from_pretrained(a_ )
_UpperCAmelCase : Any = WavaVecaFeatureExtractor.from_pretrained(
a_, return_attention_mask=a_, do_normalize=a_ )
_UpperCAmelCase : List[Any] = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
_UpperCAmelCase : Union[str, Any] = convert_classification(a_, a_, a_ )
elif arch.endswith("ForAudioFrameClassification" ):
_UpperCAmelCase : Any = convert_diarization(a_, a_, a_ )
elif arch.endswith("ForXVector" ):
_UpperCAmelCase : str = convert_xvector(a_, a_, a_ )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
_UpperCAmelCase : Union[str, Any] = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(a_ )
hf_model.save_pretrained(a_ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
__a = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 257
| 1
|
from collections.abc import Iterable
from typing import Generic, TypeVar
A_ : Union[str, Any] = TypeVar('_T')
class A_ ( Generic[_T] ):
'''simple docstring'''
def __init__(self , lowercase__ = None ) -> Optional[int]:
__UpperCAmelCase = list(iterable or [] )
__UpperCAmelCase = []
def __len__(self ) -> Dict:
return len(self._stacka ) + len(self._stacka )
def __repr__(self ) -> Any:
return F'''Queue({tuple(self._stacka[::-1] + self._stacka )})'''
def lowerCAmelCase_ (self , lowercase__ ) -> Optional[Any]:
self._stacka.append(__a )
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase = self._stacka.pop
__UpperCAmelCase = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError('''Queue is empty''' )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 303
|
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
a_ : Optional[int] = logging.getLogger(__name__)
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
_A = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
_A = field(
default=UpperCAmelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
_A = field(
default='NER' , metadata={'help': 'Task type to fine tune in training (e.g. NER, POS, etc)'})
_A = field(
default=UpperCAmelCase_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
_A = field(default=UpperCAmelCase_ , metadata={'help': 'Set this flag to use fast tokenization.'})
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_A = field(
default=UpperCAmelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
_A = field(
metadata={'help': 'The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'})
_A = field(
default=UpperCAmelCase_ , metadata={'help': 'Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'} , )
_A = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_A = field(
default=UpperCAmelCase_ , metadata={'help': 'Overwrite the cached training and evaluation sets'})
def __lowercase( ):
"""simple docstring"""
lowerCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase , lowerCamelCase , lowerCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase , lowerCamelCase , lowerCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
lowerCamelCase = import_module("tasks" )
try:
lowerCamelCase = getattr(UpperCAmelCase__ , model_args.task_type )
lowerCamelCase = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , UpperCAmelCase__ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
lowerCamelCase = token_classification_task.get_labels(data_args.labels )
lowerCamelCase = dict(enumerate(UpperCAmelCase__ ) )
lowerCamelCase = len(UpperCAmelCase__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase__ , idalabel=UpperCAmelCase__ , labelaid={label: i for i, label in enumerate(UpperCAmelCase__ )} , cache_dir=model_args.cache_dir , )
lowerCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
lowerCamelCase = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , )
# Get datasets
lowerCamelCase = (
TokenClassificationDataset(
token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowerCamelCase = (
TokenClassificationDataset(
token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(UpperCAmelCase__ , UpperCAmelCase__ ) -> Tuple[List[int], List[int]]:
lowerCamelCase = np.argmax(UpperCAmelCase__ , axis=2 )
lowerCamelCase , lowerCamelCase = preds.shape
lowerCamelCase = [[] for _ in range(UpperCAmelCase__ )]
lowerCamelCase = [[] for _ in range(UpperCAmelCase__ )]
for i in range(UpperCAmelCase__ ):
for j in range(UpperCAmelCase__ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(UpperCAmelCase__ ) -> Dict:
lowerCamelCase , lowerCamelCase = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"precision": precision_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"recall": recall_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"f1": fa_score(UpperCAmelCase__ , UpperCAmelCase__ ),
}
# Data collator
lowerCamelCase = DataCollatorWithPadding(UpperCAmelCase__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowerCamelCase = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=UpperCAmelCase__ , eval_dataset=UpperCAmelCase__ , compute_metrics=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCamelCase = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowerCamelCase = trainer.evaluate()
lowerCamelCase = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , UpperCAmelCase__ , UpperCAmelCase__ )
writer.write("%s = %s\n" % (key, value) )
results.update(UpperCAmelCase__ )
# Predict
if training_args.do_predict:
lowerCamelCase = TokenClassificationDataset(
token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
lowerCamelCase , lowerCamelCase , lowerCamelCase = trainer.predict(UpperCAmelCase__ )
lowerCamelCase , lowerCamelCase = align_predictions(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCamelCase = os.path.join(training_args.output_dir , "test_results.txt" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , "w" ) as writer:
for key, value in metrics.items():
logger.info(" %s = %s" , UpperCAmelCase__ , UpperCAmelCase__ )
writer.write("%s = %s\n" % (key, value) )
# Save predictions
lowerCamelCase = os.path.join(training_args.output_dir , "test_predictions.txt" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , "w" ) as writer:
with open(os.path.join(data_args.data_dir , "test.txt" ) , "r" ) as f:
token_classification_task.write_predictions_to_file(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return results
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 623
| 0
|
def lowercase ( __A : int ) -> bool:
'''simple docstring'''
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
__lowercase : Union[str, Any] = int(input('''Enter number: ''').strip())
print(f'''{number} is {'' if perfect(number) else 'not '}a Perfect Number.''')
| 315
|
def lowercase ( __A : Dict ) -> Optional[Any]:
'''simple docstring'''
snake_case : Union[str, Any] = len(__A )
for i in range(length - 1 ):
snake_case : Dict = i
for k in range(i + 1 , __A ):
if collection[k] < collection[least]:
snake_case : Any = k
if least != i:
snake_case , snake_case : Any = (collection[i], collection[least])
return collection
if __name__ == "__main__":
__lowercase : int = input('''Enter numbers separated by a comma:\n''').strip()
__lowercase : Optional[int] = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 315
| 1
|
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
lowercase__ = old_name
if "patch_embed" in old_name:
lowercase__ , lowercase__ , lowercase__ = old_name.split('.' )
if layer == "0":
lowercase__ = old_name.replace('0' , 'convolution1' )
elif layer == "1":
lowercase__ = old_name.replace('1' , 'batchnorm_before' )
elif layer == "3":
lowercase__ = old_name.replace('3' , 'convolution2' )
else:
lowercase__ = old_name.replace('4' , 'batchnorm_after' )
if "network" in old_name and re.search(R'\d\.\d' , _SCREAMING_SNAKE_CASE ):
lowercase__ = R'\b\d{2}\b'
if bool(re.search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ):
lowercase__ = re.search(R'\d\.\d\d.' , _SCREAMING_SNAKE_CASE ).group()
else:
lowercase__ = re.search(R'\d\.\d.' , _SCREAMING_SNAKE_CASE ).group()
if int(match[0] ) < 6:
lowercase__ = old_name.replace(_SCREAMING_SNAKE_CASE , '' )
lowercase__ = trimmed_name.replace('network' , match[0] + '.meta4D_layers.blocks.' + match[2:-1] )
lowercase__ = 'intermediate_stages.' + trimmed_name
else:
lowercase__ = old_name.replace(_SCREAMING_SNAKE_CASE , '' )
if int(match[2] ) < num_meta4D_last_stage:
lowercase__ = trimmed_name.replace('network' , 'meta4D_layers.blocks.' + match[2] )
else:
lowercase__ = str(int(match[2] ) - num_meta4D_last_stage )
lowercase__ = trimmed_name.replace('network' , 'meta3D_layers.blocks.' + layer_index )
if "norm1" in old_name:
lowercase__ = trimmed_name.replace('norm1' , 'layernorm1' )
elif "norm2" in old_name:
lowercase__ = trimmed_name.replace('norm2' , 'layernorm2' )
elif "fc1" in old_name:
lowercase__ = trimmed_name.replace('fc1' , 'linear_in' )
elif "fc2" in old_name:
lowercase__ = trimmed_name.replace('fc2' , 'linear_out' )
lowercase__ = 'last_stage.' + trimmed_name
elif "network" in old_name and re.search(R'.\d.' , _SCREAMING_SNAKE_CASE ):
lowercase__ = old_name.replace('network' , 'intermediate_stages' )
if "fc" in new_name:
lowercase__ = new_name.replace('fc' , 'convolution' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
lowercase__ = new_name.replace('norm1' , 'batchnorm_before' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
lowercase__ = new_name.replace('norm2' , 'batchnorm_after' )
if "proj" in new_name:
lowercase__ = new_name.replace('proj' , 'projection' )
if "dist_head" in new_name:
lowercase__ = new_name.replace('dist_head' , 'distillation_classifier' )
elif "head" in new_name:
lowercase__ = new_name.replace('head' , 'classifier' )
elif "patch_embed" in new_name:
lowercase__ = 'efficientformer.' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
lowercase__ = new_name.replace('norm' , 'layernorm' )
lowercase__ = 'efficientformer.' + new_name
else:
lowercase__ = 'efficientformer.encoder.' + new_name
return new_name
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
for key in checkpoint.copy().keys():
lowercase__ = checkpoint.pop(_SCREAMING_SNAKE_CASE )
lowercase__ = val
return checkpoint
def __UpperCamelCase () -> Optional[int]:
lowercase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase__ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return image
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
lowercase__ = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )['model']
lowercase__ = EfficientFormerConfig.from_json_file(_SCREAMING_SNAKE_CASE )
lowercase__ = EfficientFormerForImageClassificationWithTeacher(_SCREAMING_SNAKE_CASE )
lowercase__ = '_'.join(checkpoint_path.split('/' )[-1].split('.' )[0].split('_' )[:-1] )
lowercase__ = config.depths[-1] - config.num_metaad_blocks + 1
lowercase__ = convert_torch_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
# prepare image
lowercase__ = prepare_img()
lowercase__ = 256
lowercase__ = 224
lowercase__ = EfficientFormerImageProcessor(
size={'shortest_edge': image_size} , crop_size={'height': crop_size, 'width': crop_size} , resample=pillow_resamplings['bicubic'] , )
lowercase__ = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
# original processing pipeline
lowercase__ = Compose(
[
Resize(_SCREAMING_SNAKE_CASE , interpolation=pillow_resamplings['bicubic'] ),
CenterCrop(_SCREAMING_SNAKE_CASE ),
ToTensor(),
Normalize(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),
] )
lowercase__ = image_transforms(_SCREAMING_SNAKE_CASE ).unsqueeze(0 )
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase__ = model(_SCREAMING_SNAKE_CASE )
lowercase__ = outputs.logits
lowercase__ = (1, 1000)
if "l1" in model_name:
lowercase__ = torch.Tensor(
[-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9, -0.5_1_2_4, 0.4_1_8_3, -0.6_7_9_3, -1.3_7_7_7, -0.0_8_9_3, -0.7_3_5_8, -2.4_3_2_8] )
assert torch.allclose(logits[0, :10] , _SCREAMING_SNAKE_CASE , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
lowercase__ = torch.Tensor(
[-1.3_1_5_0, -1.5_4_5_6, -1.2_5_5_6, -0.8_4_9_6, -0.7_1_2_7, -0.7_8_9_7, -0.9_7_2_8, -0.3_0_5_2, 0.3_7_5_1, -0.3_1_2_7] )
assert torch.allclose(logits[0, :10] , _SCREAMING_SNAKE_CASE , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
lowercase__ = torch.Tensor(
[-1.0_2_8_3, -1.4_1_3_1, -0.5_6_4_4, -1.3_1_1_5, -0.5_7_8_5, -1.2_0_4_9, -0.7_5_2_8, 0.1_9_9_2, -0.3_8_2_2, -0.0_8_7_8] )
assert logits.shape == expected_shape
else:
raise ValueError(
F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print('Pushing model to the hub...' )
model.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message='Add model' , use_temp_dir=_SCREAMING_SNAKE_CASE , )
processor.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message='Add image processor' , use_temp_dir=_SCREAMING_SNAKE_CASE , )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""",
default=None,
type=str,
required=True,
help="""Path to EfficientFormer pytorch checkpoint.""",
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for EfficientFormer model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
parser.set_defaults(push_to_hub=True)
lowercase_ = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 235
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
lowercase_ = """
Human: <<task>>
Assistant: """
lowercase_ = """huggingface-tools/default-prompts"""
lowercase_ = {"""chat""": """chat_prompt_template.txt""", """run""": """run_prompt_template.txt"""}
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="run" ) -> Union[str, Any]:
if prompt_or_repo_id is None:
lowercase__ = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('\\s' , _SCREAMING_SNAKE_CASE ) is not None:
return prompt_or_repo_id
lowercase__ = cached_file(
_SCREAMING_SNAKE_CASE , PROMPT_FILES[mode] , repo_type='dataset' , user_agent={'agent': agent_name} )
with open(_SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f:
return f.read()
| 235
| 1
|
'''simple docstring'''
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class UpperCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
a : List[str] = 1
@register_to_config
def __init__( self , UpperCamelCase=2000 , UpperCamelCase=0.1 , UpperCamelCase=20 , UpperCamelCase=1E-3 ) -> Any:
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None ) -> Optional[int]:
__lowerCAmelCase = torch.linspace(1 , self.config.sampling_eps , UpperCamelCase , device=UpperCamelCase )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None ) -> Tuple:
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__lowerCAmelCase = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__lowerCAmelCase = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__lowerCAmelCase = std.flatten()
while len(std.shape ) < len(score.shape ):
__lowerCAmelCase = std.unsqueeze(-1 )
__lowerCAmelCase = -score / std
# compute
__lowerCAmelCase = -1.0 / len(self.timesteps )
__lowerCAmelCase = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__lowerCAmelCase = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__lowerCAmelCase = beta_t.unsqueeze(-1 )
__lowerCAmelCase = -0.5 * beta_t * x
__lowerCAmelCase = torch.sqrt(UpperCamelCase )
__lowerCAmelCase = drift - diffusion**2 * score
__lowerCAmelCase = x + drift * dt
# add noise
__lowerCAmelCase = randn_tensor(x.shape , layout=x.layout , generator=UpperCamelCase , device=x.device , dtype=x.dtype )
__lowerCAmelCase = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ) -> List[Any]:
return self.config.num_train_timesteps
| 714
|
'''simple docstring'''
from __future__ import annotations
def __lowerCAmelCase ( lowerCamelCase : list ):
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(lowerCamelCase ) / len(lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39
| 0
|
'''simple docstring'''
import os
from math import logaa
def __magic_name__ ( __UpperCAmelCase = "base_exp.txt" ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(__UpperCAmelCase ) , __UpperCAmelCase ) ) ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = list(map(__UpperCAmelCase , line.split(""",""" ) ) )
if x * logaa(__UpperCAmelCase ) > largest:
__SCREAMING_SNAKE_CASE = x * logaa(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = i + 1
return result
if __name__ == "__main__":
print(solution())
| 109
|
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
# TODO Update this
a = {
"facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __a ( _snake_case ):
__UpperCamelCase : str = 'esm'
def __init__( self : Tuple ,lowerCamelCase : List[Any]=None ,lowerCamelCase : str=None ,lowerCamelCase : Any=None ,lowerCamelCase : Union[str, Any]=768 ,lowerCamelCase : Tuple=12 ,lowerCamelCase : int=12 ,lowerCamelCase : Optional[int]=3072 ,lowerCamelCase : List[Any]=0.1 ,lowerCamelCase : Optional[int]=0.1 ,lowerCamelCase : Any=1026 ,lowerCamelCase : str=0.02 ,lowerCamelCase : int=1E-1_2 ,lowerCamelCase : Union[str, Any]="absolute" ,lowerCamelCase : Optional[Any]=True ,lowerCamelCase : str=None ,lowerCamelCase : Optional[int]=False ,lowerCamelCase : int=False ,lowerCamelCase : Union[str, Any]=None ,lowerCamelCase : Any=None ,**lowerCamelCase : Any ,):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase ,mask_token_id=lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = emb_layer_norm_before
__SCREAMING_SNAKE_CASE = token_dropout
__SCREAMING_SNAKE_CASE = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
__SCREAMING_SNAKE_CASE = EsmFoldConfig()
elif isinstance(lowerCamelCase ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = EsmFoldConfig(**lowerCamelCase )
__SCREAMING_SNAKE_CASE = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
__SCREAMING_SNAKE_CASE = get_default_vocab_list()
else:
__SCREAMING_SNAKE_CASE = vocab_list
else:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.esmfold_config is not None and getattr(self.esmfold_config ,"""use_esm_attn_map""" ,lowerCamelCase ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = super().to_dict()
if isinstance(self.esmfold_config ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = self.esmfold_config.to_dict()
return output
@dataclass
class __a :
__UpperCamelCase : str = None
__UpperCamelCase : bool = True
__UpperCamelCase : bool = False
__UpperCamelCase : bool = False
__UpperCamelCase : bool = False
__UpperCamelCase : float = 0
__UpperCamelCase : bool = True
__UpperCamelCase : bool = False
__UpperCamelCase : int = 128
__UpperCamelCase : "TrunkConfig" = None
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
if self.trunk is None:
__SCREAMING_SNAKE_CASE = TrunkConfig()
elif isinstance(self.trunk ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = TrunkConfig(**self.trunk )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = asdict(self )
__SCREAMING_SNAKE_CASE = self.trunk.to_dict()
return output
@dataclass
class __a :
__UpperCamelCase : int = 48
__UpperCamelCase : int = 1024
__UpperCamelCase : int = 128
__UpperCamelCase : int = 32
__UpperCamelCase : int = 32
__UpperCamelCase : int = 32
__UpperCamelCase : float = 0
__UpperCamelCase : float = 0
__UpperCamelCase : bool = False
__UpperCamelCase : int = 4
__UpperCamelCase : Optional[int] = 128
__UpperCamelCase : "StructureModuleConfig" = None
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
if self.structure_module is None:
__SCREAMING_SNAKE_CASE = StructureModuleConfig()
elif isinstance(self.structure_module ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
__SCREAMING_SNAKE_CASE = self.sequence_state_dim // self.sequence_head_width
__SCREAMING_SNAKE_CASE = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = asdict(self )
__SCREAMING_SNAKE_CASE = self.structure_module.to_dict()
return output
@dataclass
class __a :
__UpperCamelCase : int = 384
__UpperCamelCase : int = 128
__UpperCamelCase : int = 16
__UpperCamelCase : int = 128
__UpperCamelCase : int = 12
__UpperCamelCase : int = 4
__UpperCamelCase : int = 8
__UpperCamelCase : float = 0.1
__UpperCamelCase : int = 8
__UpperCamelCase : int = 1
__UpperCamelCase : int = 2
__UpperCamelCase : int = 7
__UpperCamelCase : int = 10
__UpperCamelCase : float = 1E-8
__UpperCamelCase : float = 1E5
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return asdict(self )
def __magic_name__ ( ) -> Dict:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 109
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''speech_to_text'''
UpperCamelCase__ = ['''past_key_values''']
UpperCamelCase__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : str , lowercase_ : Optional[int]=10000 , lowercase_ : int=12 , lowercase_ : Any=2048 , lowercase_ : Any=4 , lowercase_ : Dict=6 , lowercase_ : Any=2048 , lowercase_ : List[str]=4 , lowercase_ : str=0.0 , lowercase_ : str=0.0 , lowercase_ : Union[str, Any]=True , lowercase_ : List[Any]=True , lowercase_ : int="relu" , lowercase_ : str=256 , lowercase_ : int=0.1 , lowercase_ : int=0.0 , lowercase_ : str=0.0 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=2 , lowercase_ : Union[str, Any]=True , lowercase_ : Any=1 , lowercase_ : Dict=0 , lowercase_ : List[str]=2 , lowercase_ : List[Any]=6000 , lowercase_ : Tuple=1024 , lowercase_ : str=2 , lowercase_ : Any=(5, 5) , lowercase_ : Union[str, Any]=1024 , lowercase_ : Dict=80 , lowercase_ : List[Any]=1 , **lowercase_ : int , ):
lowercase_ : List[Any] = vocab_size
lowercase_ : str = d_model
lowercase_ : List[Any] = encoder_ffn_dim
lowercase_ : str = encoder_layers
lowercase_ : Dict = encoder_attention_heads
lowercase_ : str = decoder_ffn_dim
lowercase_ : int = decoder_layers
lowercase_ : Any = decoder_attention_heads
lowercase_ : Any = dropout
lowercase_ : Dict = attention_dropout
lowercase_ : Optional[int] = activation_dropout
lowercase_ : Any = activation_function
lowercase_ : Union[str, Any] = init_std
lowercase_ : str = encoder_layerdrop
lowercase_ : Optional[int] = decoder_layerdrop
lowercase_ : Dict = use_cache
lowercase_ : Union[str, Any] = encoder_layers
lowercase_ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase_ : Dict = max_source_positions
lowercase_ : Optional[int] = max_target_positions
lowercase_ : Tuple = num_conv_layers
lowercase_ : Tuple = list(lowercase_ )
lowercase_ : Union[str, Any] = conv_channels
lowercase_ : str = input_feat_per_channel
lowercase_ : str = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
| 30
|
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_lowercase : str = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class __magic_name__ ( datasets.BuilderConfig):
UpperCamelCase__ = None
def lowerCamelCase ( UpperCAmelCase__ : "pyspark.sql.DataFrame" , UpperCAmelCase__ : List[int] , ) -> str:
import pyspark
def generate_fn():
lowercase_ : List[str] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
lowercase_ : int = df_with_partition_id.select("""*""" ).where(F'''part_id = {partition_id}''' ).drop("""part_id""" )
lowercase_ : Any = partition_df.collect()
lowercase_ : Dict = 0
for row in rows:
yield F'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class __magic_name__ ( _BaseExamplesIterable):
def __init__( self : int , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : Optional[int]=None , ):
lowercase_ : Dict = df
lowercase_ : Optional[Any] = partition_order or range(self.df.rdd.getNumPartitions() )
lowercase_ : Optional[Any] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : List[Any] ):
yield from self.generate_examples_fn()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : np.random.Generator ):
lowercase_ : str = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowercase_ )
return SparkExamplesIterable(self.df , partition_order=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : int ):
lowercase_ : str = self.split_shard_indices_by_worker(lowercase_ , lowercase_ )
return SparkExamplesIterable(self.df , partition_order=lowercase_ )
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return len(self.partition_order )
class __magic_name__ ( datasets.DatasetBuilder):
UpperCamelCase__ = SparkConfig
def __init__( self : Tuple , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : str = None , lowercase_ : str = None , **lowercase_ : str , ):
import pyspark
lowercase_ : str = pyspark.sql.SparkSession.builder.getOrCreate()
lowercase_ : Optional[int] = df
lowercase_ : List[str] = working_dir
super().__init__(
cache_dir=lowercase_ , config_name=str(self.df.semanticHash() ) , **lowercase_ , )
def SCREAMING_SNAKE_CASE_ ( self : str ):
# Returns the path of the created file.
def create_cache_and_write_probe(lowercase_ : str ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=lowercase_ )
lowercase_ : List[str] = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowercase_ , """a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowercase_ : str = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowercase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : datasets.download.download_manager.DownloadManager ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ):
import pyspark
def get_arrow_batch_size(lowercase_ : Any ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
lowercase_ : Union[str, Any] = self.df.count()
lowercase_ : Union[str, Any] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowercase_ : Any = (
self.df.limit(lowercase_ )
.repartition(1 )
.mapInArrow(lowercase_ , """batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowercase_ : List[Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowercase_ : Any = min(lowercase_ , int(approx_total_size / max_shard_size ) )
lowercase_ : Any = self.df.repartition(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : str , lowercase_ : str , lowercase_ : int , ):
import pyspark
lowercase_ : Any = ParquetWriter if file_format == """parquet""" else ArrowWriter
lowercase_ : Dict = os.path.join(self._working_dir , os.path.basename(lowercase_ ) ) if self._working_dir else fpath
lowercase_ : Optional[Any] = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowercase_ : Tuple = self.config.features
lowercase_ : Any = self._writer_batch_size
lowercase_ : List[str] = self._fs.storage_options
def write_arrow(lowercase_ : str ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowercase_ : List[str] = pyspark.TaskContext().taskAttemptId()
lowercase_ : Dict = next(lowercase_ , lowercase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
lowercase_ : int = 0
lowercase_ : List[Any] = writer_class(
features=lowercase_ , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , )
lowercase_ : Optional[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(lowercase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowercase_ , lowercase_ : Dict = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
shard_id += 1
lowercase_ : Any = writer_class(
features=writer._features , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , )
lowercase_ : List[str] = pa.Table.from_batches([batch] )
writer.write_table(lowercase_ )
if writer._num_bytes > 0:
lowercase_ , lowercase_ : str = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowercase_ ) ):
lowercase_ : Optional[Any] = os.path.join(os.path.dirname(lowercase_ ) , os.path.basename(lowercase_ ) )
shutil.move(lowercase_ , lowercase_ )
lowercase_ : Union[str, Any] = (
self.df.mapInArrow(lowercase_ , """task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : "datasets.SplitGenerator" , lowercase_ : str = "arrow" , lowercase_ : Optional[Union[str, int]] = None , lowercase_ : Optional[int] = None , **lowercase_ : List[str] , ):
self._validate_cache_dir()
lowercase_ : int = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowercase_ )
lowercase_ : Tuple = not is_remote_filesystem(self._fs )
lowercase_ : int = os.path.join if is_local else posixpath.join
lowercase_ : Dict = """-TTTTT-SSSSS-of-NNNNN"""
lowercase_ : Dict = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
lowercase_ : Optional[int] = path_join(self._output_dir , lowercase_ )
lowercase_ : Any = 0
lowercase_ : Tuple = 0
lowercase_ : int = 0
lowercase_ : Dict = []
lowercase_ : Union[str, Any] = []
for task_id, content in self._prepare_split_single(lowercase_ , lowercase_ , lowercase_ ):
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Union[str, Any] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowercase_ )
lowercase_ : List[str] = total_num_examples
lowercase_ : int = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
lowercase_ : Tuple = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowercase_ : Dict = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowercase_ : int , lowercase_ : int , lowercase_ : int , ):
rename(
lowercase_ , fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace("""TTTTT-SSSSS""" , f'''{global_shard_id:05d}''' ).replace("""NNNNN""" , f'''{total_shards:05d}''' ) , )
lowercase_ : Union[str, Any] = []
lowercase_ : Tuple = 0
for i in range(len(lowercase_ ) ):
lowercase_ , lowercase_ : List[Any] = task_id_and_num_shards[i]
for shard_id in range(lowercase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowercase_ , len(lowercase_ ) ).map(lambda lowercase_ : _rename_shard(*lowercase_ ) ).collect()
else:
# don't use any pattern
lowercase_ : List[str] = 0
lowercase_ : Optional[Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace(lowercase_ , """""" ) , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : "datasets.SplitGenerator" , ):
return SparkExamplesIterable(self.df )
| 30
| 1
|
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''bert''', choices=['''bert'''])
parser.add_argument('''--model_name''', default='''bert-base-uncased''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_bert-base-uncased_0247911.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
_SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
if args.model_type == "bert":
_SCREAMING_SNAKE_CASE : Any = BertForMaskedLM.from_pretrained(args.model_name)
_SCREAMING_SNAKE_CASE : str = '''bert'''
else:
raise ValueError('''args.model_type should be \"bert\".''')
_SCREAMING_SNAKE_CASE : Optional[int] = model.state_dict()
_SCREAMING_SNAKE_CASE : str = {}
for w in ["word_embeddings", "position_embeddings"]:
_SCREAMING_SNAKE_CASE : Optional[int] = state_dict[F"{prefix}.embeddings.{w}.weight"]
for w in ["weight", "bias"]:
_SCREAMING_SNAKE_CASE : Tuple = state_dict[F"{prefix}.embeddings.LayerNorm.{w}"]
_SCREAMING_SNAKE_CASE : str = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
_SCREAMING_SNAKE_CASE : Optional[Any] = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"
]
_SCREAMING_SNAKE_CASE : Optional[Any] = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"
]
_SCREAMING_SNAKE_CASE : List[Any] = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"
]
_SCREAMING_SNAKE_CASE : Optional[Any] = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"
]
_SCREAMING_SNAKE_CASE : Optional[Any] = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"
]
_SCREAMING_SNAKE_CASE : Any = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"
]
_SCREAMING_SNAKE_CASE : Tuple = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"
]
_SCREAMING_SNAKE_CASE : Tuple = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"
]
std_idx += 1
_SCREAMING_SNAKE_CASE : Tuple = state_dict['''cls.predictions.decoder.weight''']
_SCREAMING_SNAKE_CASE : Tuple = state_dict['''cls.predictions.bias''']
if args.vocab_transform:
for w in ["weight", "bias"]:
_SCREAMING_SNAKE_CASE : int = state_dict[F"cls.predictions.transform.dense.{w}"]
_SCREAMING_SNAKE_CASE : Dict = state_dict[F"cls.predictions.transform.LayerNorm.{w}"]
print(F"N layers selected for distillation: {std_idx}")
print(F"Number of params transferred for distillation: {len(compressed_sd.keys())}")
print(F"Save transferred checkpoint to {args.dump_checkpoint}.")
torch.save(compressed_sd, args.dump_checkpoint)
| 493
|
import unittest
from knapsack import greedy_knapsack as kp
class lowerCamelCase ( unittest.TestCase ):
def A( self):
__UpperCAmelCase : Optional[Any] = [1_0, 2_0, 3_0, 4_0, 5_0, 6_0]
__UpperCAmelCase : str = [2, 4, 6, 8, 1_0, 1_2]
__UpperCAmelCase : List[Any] = 1_0_0
self.assertEqual(kp.calc_profit(lowercase__ , lowercase__ , lowercase__) , 2_1_0)
def A( self):
self.assertRaisesRegex(lowercase__ , '''max_weight must greater than zero.''')
def A( self):
self.assertRaisesRegex(lowercase__ , '''Weight can not be negative.''')
def A( self):
self.assertRaisesRegex(lowercase__ , '''Profit can not be negative.''')
def A( self):
self.assertRaisesRegex(lowercase__ , '''max_weight must greater than zero.''')
def A( self):
self.assertRaisesRegex(
lowercase__ , '''The length of profit and weight must be same.''')
if __name__ == "__main__":
unittest.main()
| 462
| 0
|
"""simple docstring"""
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class snake_case_:
def __init__( self : int , UpperCamelCase_ : str = "cpu" , UpperCamelCase_ : str = "openai/clip-vit-large-patch14" ):
lowerCAmelCase : int = device
lowerCAmelCase : Optional[int] = CLIPTokenizerFast.from_pretrained(UpperCamelCase_ )
lowerCAmelCase : Tuple = [0.48_145_466, 0.4_578_275, 0.40_821_073]
lowerCAmelCase : List[str] = [0.26_862_954, 0.26_130_258, 0.27_577_711]
lowerCAmelCase : Dict = torchvision.transforms.Normalize(self.image_mean , self.image_std )
lowerCAmelCase : List[Any] = torchvision.transforms.Resize(2_2_4 )
lowerCAmelCase : str = torchvision.transforms.CenterCrop(2_2_4 )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : List[Any] ):
lowerCAmelCase : Optional[Any] = self.resize(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = self.center_crop(UpperCamelCase_ )
lowerCAmelCase : str = self.normalize(UpperCamelCase_ )
return images
def __call__( self : str , UpperCamelCase_ : Dict=None , UpperCamelCase_ : List[str]=None , **UpperCamelCase_ : Optional[Any] ):
lowerCAmelCase : Union[str, Any] = self.tokenizer(text=UpperCamelCase_ , **UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = self.preprocess_img(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class snake_case_( nn.Module ):
def __init__( self : str , UpperCamelCase_ : str=1_0 , UpperCamelCase_ : int=0.01 , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : int=None , UpperCamelCase_ : int=None , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : List[Any]=False , UpperCamelCase_ : str=True , UpperCamelCase_ : Any="image" , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Optional[Any]=False , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Tuple=False , ):
super().__init__()
lowerCAmelCase : List[Any] = None
lowerCAmelCase : Optional[Any] = device if device else get_device()
if vqgan:
lowerCAmelCase : Optional[int] = vqgan
else:
lowerCAmelCase : int = load_vqgan(self.device , conf_path=UpperCamelCase_ , ckpt_path=UpperCamelCase_ )
self.vqgan.eval()
if clip:
lowerCAmelCase : List[str] = clip
else:
lowerCAmelCase : Dict = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
lowerCAmelCase : Tuple = ProcessorGradientFlow(device=self.device )
lowerCAmelCase : int = iterations
lowerCAmelCase : Optional[Any] = lr
lowerCAmelCase : Dict = log
lowerCAmelCase : List[Any] = make_grid
lowerCAmelCase : Dict = return_val
lowerCAmelCase : Tuple = quantize
lowerCAmelCase : Union[str, Any] = self.vqgan.decoder.z_shape
def lowerCamelCase__ ( self : str , UpperCamelCase_ : Any=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Tuple=5 , UpperCamelCase_ : List[Any]=True ):
lowerCAmelCase : str = []
if output_path is None:
lowerCAmelCase : List[Any] = '''./animation.gif'''
if input_path is None:
lowerCAmelCase : int = self.save_path
lowerCAmelCase : Tuple = sorted(glob(input_path + '''/*''' ) )
if not len(UpperCamelCase_ ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(UpperCamelCase_ ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
lowerCAmelCase : int = total_duration / len(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = [frame_duration] * len(UpperCamelCase_ )
if extend_frames:
lowerCAmelCase : Union[str, Any] = 1.5
lowerCAmelCase : str = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(UpperCamelCase_ ) )
imageio.mimsave(UpperCamelCase_ , UpperCamelCase_ , duration=UpperCamelCase_ )
print(F'''gif saved to {output_path}''' )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : List[Any]=None ):
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
lowerCAmelCase : Dict = preprocess(Image.open(UpperCamelCase_ ) , target_image_size=2_5_6 ).to(self.device )
lowerCAmelCase : str = preprocess_vqgan(UpperCamelCase_ )
lowerCAmelCase : Dict = self.vqgan.encode(UpperCamelCase_ )
return z
def lowerCamelCase__ ( self : int , UpperCamelCase_ : Tuple ):
lowerCAmelCase : List[str] = self.latent.detach().requires_grad_()
lowerCAmelCase : Optional[Any] = base_latent + transform_vector
if self.quantize:
lowerCAmelCase : str = self.vqgan.quantize(UpperCamelCase_ )
else:
lowerCAmelCase : int = trans_latent
return self.vqgan.decode(UpperCamelCase_ )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict=None ):
lowerCAmelCase : Dict = self.clip_preprocessor(text=UpperCamelCase_ , images=UpperCamelCase_ , return_tensors='''pt''' , padding=UpperCamelCase_ )
lowerCAmelCase : List[str] = self.clip(**UpperCamelCase_ )
lowerCAmelCase : Tuple = clip_outputs.logits_per_image
if weights is not None:
lowerCAmelCase : Tuple = similarity_logits * weights
return similarity_logits.sum()
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : List[Any] ):
lowerCAmelCase : Union[str, Any] = self._get_clip_similarity(pos_prompts['''prompts'''] , UpperCamelCase_ , weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
lowerCAmelCase : Optional[int] = self._get_clip_similarity(neg_prompts['''prompts'''] , UpperCamelCase_ , weights=neg_prompts['''weights'''] )
else:
lowerCAmelCase : List[Any] = torch.tensor([1] , device=self.device )
lowerCAmelCase : Tuple = -torch.log(UpperCamelCase_ ) + torch.log(UpperCamelCase_ )
return loss
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str] ):
lowerCAmelCase : List[Any] = torch.randn_like(self.latent , requires_grad=UpperCamelCase_ , device=self.device )
lowerCAmelCase : List[Any] = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
lowerCAmelCase : str = self._add_vector(UpperCamelCase_ )
lowerCAmelCase : int = loop_post_process(UpperCamelCase_ )
lowerCAmelCase : List[Any] = self._get_CLIP_loss(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
print('''CLIP loss''' , UpperCamelCase_ )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=UpperCamelCase_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : Dict ):
wandb.init(reinit=UpperCamelCase_ , project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
lowerCAmelCase : Optional[Any] = Image.open(UpperCamelCase_ )
lowerCAmelCase : str = image.resize((2_5_6, 2_5_6) )
wandb.log('''Original Image''' , wandb.Image(UpperCamelCase_ ) )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Tuple ):
if not prompts:
return []
lowerCAmelCase : Tuple = []
lowerCAmelCase : Optional[Any] = []
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase : Union[str, Any] = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(UpperCamelCase_ , (tuple, list) ):
lowerCAmelCase : Optional[int] = prompt[0]
lowerCAmelCase : Tuple = float(prompt[1] )
elif ":" in prompt:
lowerCAmelCase : Dict = prompt.split(''':''' )
lowerCAmelCase : List[str] = float(UpperCamelCase_ )
else:
lowerCAmelCase : List[str] = prompt
lowerCAmelCase : List[str] = 1.0
processed_prompts.append(UpperCamelCase_ )
weights.append(UpperCamelCase_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(UpperCamelCase_ , device=self.device ),
}
def lowerCamelCase__ ( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : str=True , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Any=None , ):
if image_path:
lowerCAmelCase : Optional[int] = self._get_latent(UpperCamelCase_ )
else:
lowerCAmelCase : str = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
assert pos_prompts, "You must provide at least one positive prompt."
lowerCAmelCase : str = self.process_prompts(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = self.process_prompts(UpperCamelCase_ )
if save_final and save_path is None:
lowerCAmelCase : Tuple = os.path.join('''./outputs/''' , '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(UpperCamelCase_ ):
os.makedirs(UpperCamelCase_ )
else:
lowerCAmelCase : List[str] = save_path + '''_''' + get_timestamp()
os.makedirs(UpperCamelCase_ )
lowerCAmelCase : Any = save_path
lowerCAmelCase : List[Any] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(UpperCamelCase_ ) )
lowerCAmelCase : str = loop_post_process(UpperCamelCase_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) ):
if show_intermediate:
show_pil(UpperCamelCase_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}.png''' ) )
if self.log:
wandb.log({'''Image''': wandb.Image(UpperCamelCase_ )} )
if show_final:
show_pil(UpperCamelCase_ )
if save_final:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}_final.png''' ) )
| 700
|
"""simple docstring"""
from __future__ import annotations
from PIL import Image
# Define glider example
snake_case__ : int = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
snake_case__ : Any = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def _snake_case ( _snake_case : list[list[int]] ):
lowerCAmelCase : Union[str, Any] = []
for i in range(len(_snake_case ) ):
lowerCAmelCase : Any = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
lowerCAmelCase : Optional[int] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(_snake_case ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(_snake_case ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(_snake_case ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
lowerCAmelCase : str = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(_snake_case )
return next_generation
def _snake_case ( _snake_case : list[list[int]] , _snake_case : int ):
lowerCAmelCase : int = []
for _ in range(_snake_case ):
# Create output image
lowerCAmelCase : Union[str, Any] = Image.new('''RGB''' , (len(cells[0] ), len(_snake_case )) )
lowerCAmelCase : Union[str, Any] = img.load()
# Save cells to image
for x in range(len(_snake_case ) ):
for y in range(len(cells[0] ) ):
lowerCAmelCase : Optional[int] = 255 - cells[y][x] * 255
lowerCAmelCase : List[Any] = (colour, colour, colour)
# Save image
images.append(_snake_case )
lowerCAmelCase : Union[str, Any] = new_generation(_snake_case )
return images
if __name__ == "__main__":
snake_case__ : Union[str, Any] = generate_images(GLIDER, 16)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 637
| 0
|
"""simple docstring"""
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Union[str, Any] = """new-model"""
if is_tf_available():
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Dict = NewModelConfig
@require_tf
class __lowercase ( unittest.TestCase):
"""simple docstring"""
@slow
def __UpperCamelCase (self ):
snake_case_ : Optional[Any] = """bert-base-cased"""
snake_case_ : Optional[int] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
snake_case_ : List[Any] = TFAutoModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def __UpperCamelCase (self ):
snake_case_ : List[str] = """bert-base-cased"""
snake_case_ : int = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
snake_case_ : str = TFAutoModelForPreTraining.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def __UpperCamelCase (self ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Optional[Any] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
snake_case_ : Any = TFAutoModelForCausalLM.from_pretrained(lowercase__ )
snake_case_ , snake_case_ : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained(lowercase__ , output_loading_info=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def __UpperCamelCase (self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Optional[int] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
snake_case_ : List[str] = TFAutoModelWithLMHead.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def __UpperCamelCase (self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Optional[int] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
snake_case_ : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(lowercase__ )
snake_case_ , snake_case_ : str = TFAutoModelForMaskedLM.from_pretrained(lowercase__ , output_loading_info=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def __UpperCamelCase (self ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Tuple = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
snake_case_ : int = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase__ )
snake_case_ , snake_case_ : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase__ , output_loading_info=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def __UpperCamelCase (self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
snake_case_ : Dict = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
snake_case_ : Union[str, Any] = TFAutoModelForSequenceClassification.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def __UpperCamelCase (self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
snake_case_ : Tuple = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
snake_case_ : Optional[Any] = TFAutoModelForQuestionAnswering.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
@require_tensorflow_probability
def __UpperCamelCase (self ):
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
snake_case_ : Union[str, Any] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
snake_case_ : List[Any] = TFAutoModelForTableQuestionAnswering.from_pretrained(lowercase__ )
snake_case_ , snake_case_ : Any = TFAutoModelForTableQuestionAnswering.from_pretrained(
lowercase__ , output_loading_info=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Tuple = TFAutoModelWithLMHead.from_pretrained(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 1_44_10 )
def __UpperCamelCase (self ):
snake_case_ : Optional[int] = TFAutoModelWithLMHead.from_pretrained(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 1_44_10 )
def __UpperCamelCase (self ):
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
snake_case_ : Union[str, Any] = TFAutoModel.from_pretrained("""sgugger/funnel-random-tiny""" )
self.assertIsInstance(lowercase__ , lowercase__ )
snake_case_ : int = copy.deepcopy(model.config )
snake_case_ : Union[str, Any] = ["""FunnelBaseModel"""]
snake_case_ : Tuple = TFAutoModel.from_config(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowercase__ )
snake_case_ : Dict = TFAutoModel.from_pretrained(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
def __UpperCamelCase (self ):
try:
AutoConfig.register("""new-model""" , lowercase__ )
snake_case_ : Optional[int] = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(lowercase__ ):
auto_class.register(lowercase__ , lowercase__ )
auto_class.register(lowercase__ , lowercase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase__ ):
auto_class.register(lowercase__ , lowercase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
snake_case_ : Dict = BertModelTester(self ).get_config()
snake_case_ : Dict = NewModelConfig(**tiny_config.to_dict() )
snake_case_ : Optional[int] = auto_class.from_config(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowercase__ )
snake_case_ : List[Any] = auto_class.from_pretrained(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def __UpperCamelCase (self ):
with self.assertRaisesRegex(
lowercase__ , """bert-base is not a local folder and is not a valid model identifier""" ):
snake_case_ : Dict = TFAutoModel.from_pretrained("""bert-base""" )
def __UpperCamelCase (self ):
with self.assertRaisesRegex(
lowercase__ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
snake_case_ : List[str] = TFAutoModel.from_pretrained(lowercase__ , revision="""aaaaaa""" )
def __UpperCamelCase (self ):
with self.assertRaisesRegex(
lowercase__ , """hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin""" , ):
snake_case_ : int = TFAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" )
def __UpperCamelCase (self ):
with self.assertRaisesRegex(lowercase__ , """Use `from_pt=True` to load this model""" ):
snake_case_ : int = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
def __UpperCamelCase (self ):
# Make sure we have cached the model.
snake_case_ : Optional[Any] = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
snake_case_ : List[Any] = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
snake_case_ : List[Any] = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
with RequestCounter() as counter:
snake_case_ : Union[str, Any] = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 480
|
"""simple docstring"""
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
@require_torch
def __UpperCamelCase (self ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
snake_case_ : int = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
snake_case_ : Any = """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
snake_case_ : Dict = """
import socket
def offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
snake_case_ : Tuple = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(lowercase__ )
BertModel.from_pretrained(lowercase__ )
BertTokenizer.from_pretrained(lowercase__ )
pipeline(task="""fill-mask""" , model=lowercase__ )
# baseline - just load from_pretrained with normal network
snake_case_ : Optional[Any] = [sys.executable, """-c""", """\n""".join([load, run, mock] )]
# should succeed
snake_case_ : Tuple = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
snake_case_ : str = """1"""
snake_case_ : List[Any] = subprocess.run(lowercase__ , env=lowercase__ , check=lowercase__ , capture_output=lowercase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def __UpperCamelCase (self ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
snake_case_ : List[str] = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
snake_case_ : int = """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
snake_case_ : Optional[int] = """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
snake_case_ : Optional[int] = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(lowercase__ )
BertModel.from_pretrained(lowercase__ )
BertTokenizer.from_pretrained(lowercase__ )
pipeline(task="""fill-mask""" , model=lowercase__ )
# baseline - just load from_pretrained with normal network
snake_case_ : int = [sys.executable, """-c""", """\n""".join([load, run, mock] )]
# should succeed
snake_case_ : List[Any] = self.get_env()
snake_case_ : Dict = subprocess.run(lowercase__ , env=lowercase__ , check=lowercase__ , capture_output=lowercase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def __UpperCamelCase (self ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
snake_case_ : Optional[int] = """
from transformers import BertConfig, BertModel, BertTokenizer
"""
snake_case_ : Dict = """
mname = \"hf-internal-testing/tiny-random-bert-sharded\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print(\"success\")
"""
snake_case_ : int = """
import socket
def offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
# baseline - just load from_pretrained with normal network
snake_case_ : List[Any] = [sys.executable, """-c""", """\n""".join([load, run] )]
# should succeed
snake_case_ : List[str] = self.get_env()
snake_case_ : str = subprocess.run(lowercase__ , env=lowercase__ , check=lowercase__ , capture_output=lowercase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
# next emulate no network
snake_case_ : Any = [sys.executable, """-c""", """\n""".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
snake_case_ : Optional[Any] = """1"""
snake_case_ : int = subprocess.run(lowercase__ , env=lowercase__ , check=lowercase__ , capture_output=lowercase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def __UpperCamelCase (self ):
snake_case_ : str = """
from transformers import pipeline
"""
snake_case_ : Dict = """
mname = \"hf-internal-testing/tiny-random-bert\"
pipe = pipeline(model=mname)
"""
snake_case_ : Dict = """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
snake_case_ : List[str] = self.get_env()
snake_case_ : Dict = """1"""
snake_case_ : int = [sys.executable, """-c""", """\n""".join([load, mock, run] )]
snake_case_ : Optional[int] = subprocess.run(lowercase__ , env=lowercase__ , check=lowercase__ , capture_output=lowercase__ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"""You cannot infer task automatically within `pipeline` when using offline mode""" , result.stderr.decode().replace("""\n""" , """""" ) , )
@require_torch
def __UpperCamelCase (self ):
snake_case_ : int = """
from transformers import AutoModel
"""
snake_case_ : Optional[int] = """
mname = \"hf-internal-testing/test_dynamic_model\"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print(\"success\")
"""
# baseline - just load from_pretrained with normal network
snake_case_ : Dict = [sys.executable, """-c""", """\n""".join([load, run] )]
# should succeed
snake_case_ : Optional[Any] = self.get_env()
snake_case_ : List[str] = subprocess.run(lowercase__ , env=lowercase__ , check=lowercase__ , capture_output=lowercase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
snake_case_ : Any = """1"""
snake_case_ : Dict = subprocess.run(lowercase__ , env=lowercase__ , check=lowercase__ , capture_output=lowercase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
| 480
| 1
|
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__snake_case : Optional[Any] = logging.get_logger(__name__)
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase_ : Union[List[ControlNetModel], Tuple[ControlNetModel]] ) -> Dict:
'''simple docstring'''
super().__init__()
A__ : List[str] =nn.ModuleList(__lowerCamelCase )
def lowercase__ ( self : int , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : Union[torch.Tensor, float, int] , lowerCAmelCase_ : torch.Tensor , lowerCAmelCase_ : List[torch.tensor] , lowerCAmelCase_ : List[float] , lowerCAmelCase_ : Optional[torch.Tensor] = None , lowerCAmelCase_ : Optional[torch.Tensor] = None , lowerCAmelCase_ : Optional[torch.Tensor] = None , lowerCAmelCase_ : Optional[Dict[str, Any]] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True , ) -> List[Any]:
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(__lowerCamelCase , __lowerCamelCase , self.nets ) ):
A__ : int =controlnet(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
# merge samples
if i == 0:
A__ : Union[str, Any] =down_samples, mid_sample
else:
A__ : Tuple =[
samples_prev + samples_curr
for samples_prev, samples_curr in zip(__lowerCamelCase , __lowerCamelCase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, os.PathLike] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Callable = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[str] = None , ) -> Dict:
'''simple docstring'''
A__ : Any =0
A__ : Tuple =save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
__lowerCamelCase , is_main_process=__lowerCamelCase , save_function=__lowerCamelCase , safe_serialization=__lowerCamelCase , variant=__lowerCamelCase , )
idx += 1
A__ : Optional[Any] =model_path_to_save + f"_{idx}"
@classmethod
def lowercase__ ( cls : Dict , lowerCAmelCase_ : Optional[Union[str, os.PathLike]] , **lowerCAmelCase_ : Optional[int] ) -> Dict:
'''simple docstring'''
A__ : Optional[int] =0
A__ : List[str] =[]
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
A__ : int =pretrained_model_path
while os.path.isdir(__lowerCamelCase ):
A__ : int =ControlNetModel.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
controlnets.append(__lowerCamelCase )
idx += 1
A__ : int =pretrained_model_path + f"_{idx}"
logger.info(f"{len(__lowerCamelCase )} controlnets loaded from {pretrained_model_path}." )
if len(__lowerCamelCase ) == 0:
raise ValueError(
f"No ControlNets found under {os.path.dirname(__lowerCamelCase )}. Expected at least {pretrained_model_path + '_0'}." )
return cls(__lowerCamelCase )
| 705
|
'''simple docstring'''
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case : int = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
__snake_case : List[str] = 5_0003
__snake_case : Dict = 5_0002
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = PLBartTokenizer
__snake_case = None
__snake_case = False
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
A__ : Tuple =PLBartTokenizer(lowerCAmelCase_ , language_codes="""base""" , keep_accents=lowerCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
A__ : Union[str, Any] =PLBartTokenizer(lowerCAmelCase_ , language_codes="""base""" , keep_accents=lowerCAmelCase_ )
A__ : Optional[Any] =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
A__ : Tuple =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A__ : Any =tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A__ : str =tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
A__ : Optional[Any] =tokenizer.vocab_size
A__ : Dict =[tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) for x in range(end - 4 , lowerCAmelCase_ )]
self.assertListEqual(lowerCAmelCase_ , ["""__java__""", """__python__""", """__en_XX__""", """<mask>"""] )
A__ : Dict ="""java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
A__ : int =tokenizer(lowerCAmelCase_ ).input_ids
self.assertEqual(
tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ ) , lowerCAmelCase_ , )
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
A__ : int =PLBartTokenizer(lowerCAmelCase_ , language_codes="""multi""" , keep_accents=lowerCAmelCase_ )
A__ : Dict =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
A__ : Dict =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A__ : str =tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A__ : Dict =tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
A__ : Tuple =tokenizer.vocab_size
A__ : Dict =[tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) for x in range(end - 7 , lowerCAmelCase_ )]
self.assertListEqual(
lowerCAmelCase_ , ["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""] )
A__ : Any ="""java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
A__ : int =tokenizer(lowerCAmelCase_ ).input_ids
self.assertEqual(
tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ ) , lowerCAmelCase_ , )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
__snake_case = 'uclanlp/plbart-python-en_XX'
__snake_case = [
'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])',
'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])',
]
__snake_case = [
'Returns the maximum value of a b c.',
'Sums the values of a b c.',
]
__snake_case = [
134,
5452,
3_3460,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
988,
20,
3_3456,
19,
3_3456,
771,
39,
4258,
889,
3318,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def lowercase__ ( cls : Optional[int] ) -> str:
'''simple docstring'''
A__ : PLBartTokenizer =PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="""base""" , src_lang="""python""" , tgt_lang="""en_XX""" )
A__ : Optional[Any] =1
return cls
def lowercase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] , 5_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] , 5_00_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] , 5_00_03 )
def lowercase__ ( self : int ) -> List[str]:
'''simple docstring'''
A__ : Union[str, Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_ )
def lowercase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
self.assertIn(lowerCAmelCase_ , self.tokenizer.all_special_ids )
A__ : Tuple =[EN_CODE, 90_37, 3_34_42, 57, 7_52, 1_53, 14, 56, 18, 9, 2]
A__ : Any =self.tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
A__ : Optional[int] =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
A__ : Optional[int] =["""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""" * 20]
self.assertIsInstance(src_text[0] , lowerCAmelCase_ )
A__ : str =10
A__ : Optional[Any] =self.tokenizer(lowerCAmelCase_ , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowerCAmelCase_ )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""] ) , [5_00_04, 5_00_01] )
def lowercase__ ( self : Tuple ) -> str:
'''simple docstring'''
A__ : Tuple =tempfile.mkdtemp()
A__ : Tuple =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase_ )
A__ : Optional[Any] =PLBartTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase_ )
@require_torch
def lowercase__ ( self : Any ) -> Any:
'''simple docstring'''
A__ : List[str] =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , return_tensors="""pt""" )
A__ : str =shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , lowerCAmelCase_ )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
A__ : Union[str, Any] =self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
A__ : Any =shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
A__ : List[Any] =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowercase__ ( self : Any ) -> Dict:
'''simple docstring'''
A__ : Any =self.tokenizer(self.src_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=3 , return_tensors="""pt""" )
A__ : Optional[int] =self.tokenizer(
text_target=self.tgt_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=10 , return_tensors="""pt""" )
A__ : Optional[Any] =targets["""input_ids"""]
A__ : List[str] =shift_tokens_right(lowerCAmelCase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
A__ : Any =self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""java""" )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , {
# A, test, EOS, en_XX
"""input_ids""": [[1_50, 2_42, 2, 5_00_03]],
"""attention_mask""": [[1, 1, 1, 1]],
# java
"""forced_bos_token_id""": 5_00_01,
} , )
| 687
| 0
|
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
lowerCamelCase_ = logging.getLogger(__name__)
class a_ ( a_ ):
'''simple docstring'''
def __init__( self , lowercase_=-1 ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = label_idx
def _lowercase ( self , lowercase_ , lowercase_ ) -> List[InputExample]:
'''simple docstring'''
if isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase_ = mode.value
lowerCAmelCase_ = os.path.join(lowercase_ , f'''{mode}.txt''' )
lowerCAmelCase_ = 1
lowerCAmelCase_ = []
with open(lowercase_ , encoding='utf-8' ) as f:
lowerCAmelCase_ = []
lowerCAmelCase_ = []
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowercase_ , labels=lowercase_ ) )
guid_index += 1
lowerCAmelCase_ = []
lowerCAmelCase_ = []
else:
lowerCAmelCase_ = line.split(' ' )
words.append(splits[0] )
if len(lowercase_ ) > 1:
labels.append(splits[self.label_idx].replace('\n' , '' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowercase_ , labels=lowercase_ ) )
return examples
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(lowercase_ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowerCAmelCase_ = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(lowercase_ )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] )
def _lowercase ( self , lowercase_ ) -> List[str]:
'''simple docstring'''
if path:
with open(lowercase_ , 'r' ) as f:
lowerCAmelCase_ = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase_ = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class a_ ( a_ ):
'''simple docstring'''
def __init__( self ) -> Dict:
'''simple docstring'''
super().__init__(label_idx=-2 )
def _lowercase ( self , lowercase_ ) -> List[str]:
'''simple docstring'''
if path:
with open(lowercase_ , 'r' ) as f:
lowerCAmelCase_ = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase_ = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class a_ ( a_ ):
'''simple docstring'''
def _lowercase ( self , lowercase_ , lowercase_ ) -> List[InputExample]:
'''simple docstring'''
if isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase_ = mode.value
lowerCAmelCase_ = os.path.join(lowercase_ , f'''{mode}.txt''' )
lowerCAmelCase_ = 1
lowerCAmelCase_ = []
with open(lowercase_ , encoding='utf-8' ) as f:
for sentence in parse_incr(lowercase_ ):
lowerCAmelCase_ = []
lowerCAmelCase_ = []
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(lowercase_ ) == len(lowercase_ )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowercase_ , labels=lowercase_ ) )
guid_index += 1
return examples
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = 0
for sentence in parse_incr(lowercase_ ):
lowerCAmelCase_ = preds_list[example_id]
lowerCAmelCase_ = ''
for token in sentence:
out += f'''{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(lowercase_ )
example_id += 1
def _lowercase ( self , lowercase_ ) -> List[str]:
'''simple docstring'''
if path:
with open(lowercase_ , 'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 318
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"""microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""",
}
class a_ ( a_ , a_ ):
'''simple docstring'''
__a: Union[str, Any] = '''resnet'''
__a: List[Any] = ['''basic''', '''bottleneck''']
def __init__( self , lowercase_=3 , lowercase_=6_4 , lowercase_=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , lowercase_=[3, 4, 6, 3] , lowercase_="bottleneck" , lowercase_="relu" , lowercase_=False , lowercase_=None , lowercase_=None , **lowercase_ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**lowercase_ )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = embedding_size
lowerCAmelCase_ = hidden_sizes
lowerCAmelCase_ = depths
lowerCAmelCase_ = layer_type
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = downsample_in_first_stage
lowerCAmelCase_ = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(lowercase_ ) + 1 )]
lowerCAmelCase_ , lowerCAmelCase_ = get_aligned_output_features_output_indices(
out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names )
class a_ ( a_ ):
'''simple docstring'''
__a: Optional[int] = version.parse('''1.11''' )
@property
def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _lowercase ( self ) -> float:
'''simple docstring'''
return 1e-3
| 318
| 1
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : List[Any] = logging.get_logger(__name__)
a__ : Union[str, Any] = {
'''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowercase ( _UpperCAmelCase ):
"""simple docstring"""
snake_case_ = 'sew-d'
def __init__( self : List[str] , a_ : Dict=32 , a_ : List[Any]=7_68 , a_ : Dict=12 , a_ : Dict=12 , a_ : Optional[Any]=30_72 , a_ : str=2 , a_ : str=5_12 , a_ : Union[str, Any]=2_56 , a_ : Any=True , a_ : List[str]=True , a_ : Union[str, Any]=("p2c", "c2p") , a_ : List[str]="layer_norm" , a_ : Optional[int]="gelu_python" , a_ : List[Any]=0.1 , a_ : Any=0.1 , a_ : List[Any]=0.1 , a_ : Union[str, Any]=0.0 , a_ : str=0.1 , a_ : Optional[Any]=0.0_2 , a_ : str=1e-7 , a_ : Optional[int]=1e-5 , a_ : List[Any]="group" , a_ : List[Any]="gelu" , a_ : int=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , a_ : Any=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , a_ : Optional[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , a_ : Dict=False , a_ : List[str]=1_28 , a_ : List[str]=16 , a_ : Optional[Any]=True , a_ : Optional[Any]=0.0_5 , a_ : List[str]=10 , a_ : str=2 , a_ : Tuple=0.0 , a_ : List[str]=10 , a_ : Tuple=0 , a_ : Optional[int]="mean" , a_ : List[Any]=False , a_ : List[str]=False , a_ : Tuple=2_56 , a_ : List[str]=0 , a_ : str=1 , a_ : List[Any]=2 , **a_ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**__UpperCamelCase , pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase )
lowerCamelCase__ = hidden_size
lowerCamelCase__ = feat_extract_norm
lowerCamelCase__ = feat_extract_activation
lowerCamelCase__ = list(__UpperCamelCase )
lowerCamelCase__ = list(__UpperCamelCase )
lowerCamelCase__ = list(__UpperCamelCase )
lowerCamelCase__ = conv_bias
lowerCamelCase__ = num_conv_pos_embeddings
lowerCamelCase__ = num_conv_pos_embedding_groups
lowerCamelCase__ = len(self.conv_dim )
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = squeeze_factor
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = position_buckets
lowerCamelCase__ = share_att_key
lowerCamelCase__ = relative_attention
lowerCamelCase__ = norm_rel_ebd
lowerCamelCase__ = list(__UpperCamelCase )
lowerCamelCase__ = hidden_act
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = hidden_dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = activation_dropout
lowerCamelCase__ = feat_proj_dropout
lowerCamelCase__ = final_dropout
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = feature_layer_norm_eps
lowerCamelCase__ = initializer_range
lowerCamelCase__ = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase__ = apply_spec_augment
lowerCamelCase__ = mask_time_prob
lowerCamelCase__ = mask_time_length
lowerCamelCase__ = mask_time_min_masks
lowerCamelCase__ = mask_feature_prob
lowerCamelCase__ = mask_feature_length
lowerCamelCase__ = mask_feature_min_masks
# ctc loss
lowerCamelCase__ = ctc_loss_reduction
lowerCamelCase__ = ctc_zero_infinity
# sequence classification
lowerCamelCase__ = use_weighted_layer_sum
lowerCamelCase__ = classifier_proj_size
@property
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 719
|
def snake_case (UpperCamelCase : int ):
'''simple docstring'''
lowerCamelCase__ = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(2_7))
print(perfect_cube(4))
| 235
| 0
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def a ( ) ->str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = '''https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg'''
SCREAMING_SNAKE_CASE = Image.open(requests.get(a , stream=a ).raw ).convert('''RGB''' )
return image
def a ( a ) ->Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.embeddings.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.embeddings.layernorm.bias''') )
# fmt: on
return rename_keys
def a ( a , a , a ) ->Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = dct.pop(a )
SCREAMING_SNAKE_CASE = val
def a ( a , a ) ->Optional[Any]:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
SCREAMING_SNAKE_CASE = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" )
SCREAMING_SNAKE_CASE = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
SCREAMING_SNAKE_CASE = torch.cat((q_bias, torch.zeros_like(a , requires_grad=a ), v_bias) )
SCREAMING_SNAKE_CASE = qkv_bias
def a ( a ) ->Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 364 if '''coco''' in model_name else 224
SCREAMING_SNAKE_CASE = InstructBlipVisionConfig(image_size=a ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
SCREAMING_SNAKE_CASE = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
SCREAMING_SNAKE_CASE = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
SCREAMING_SNAKE_CASE = LlamaConfig.from_pretrained('''decapoda-research/llama-7b-hf''' , vocab_size=3_2001 ).to_dict()
elif "vicuna-13b" in model_name:
SCREAMING_SNAKE_CASE = LlamaConfig.from_pretrained('''decapoda-research/llama-13b-hf''' , vocab_size=3_2001 ).to_dict()
else:
raise ValueError('''Model name not supported''' )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
SCREAMING_SNAKE_CASE = InstructBlipQFormerConfig(vocab_size=3_0523 ).to_dict()
SCREAMING_SNAKE_CASE = InstructBlipConfig(vision_config=a , text_config=a , qformer_config=a )
return config, image_size
@torch.no_grad()
def a ( a , a=None , a=False ) ->Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('''bert-base-uncased''' , truncation_side='''left''' )
qformer_tokenizer.add_special_tokens({'''bos_token''': '''[DEC]'''} )
if "t5" in model_name:
SCREAMING_SNAKE_CASE = TaTokenizerFast.from_pretrained('''google/flan-t5-xl''' , truncation_side='''left''' )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
SCREAMING_SNAKE_CASE = LlamaTokenizerFast.from_pretrained(
'''huggyllama/llama-7b''' , truncation_side='''left''' , bos_token='''</s>''' , unk_token='''</s>''' )
tokenizer.add_special_tokens({'''pad_token''': '''[PAD]'''} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_blipa_config(a )
SCREAMING_SNAKE_CASE = InstructBlipForConditionalGeneration(a ).eval()
SCREAMING_SNAKE_CASE = {
'''instructblip-vicuna-7b''': ('''blip2_vicuna_instruct''', '''vicuna7b'''),
'''instructblip-vicuna-13b''': ('''blip2_vicuna_instruct''', '''vicuna13b'''),
'''instructblip-flan-t5-xl''': ('''blip2_t5_instruct''', '''flant5xl'''),
'''instructblip-flan-t5-xxl''': ('''blip2_t5_instruct''', '''flant5xxl'''),
}
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
SCREAMING_SNAKE_CASE = '''cuda:1''' if torch.cuda.is_available() else '''cpu'''
SCREAMING_SNAKE_CASE = '''cuda:2''' if torch.cuda.is_available() else '''cpu'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = load_model_and_preprocess(
name=a , model_type=a , is_eval=a , device=a )
original_model.eval()
print('''Done!''' )
# update state dict keys
SCREAMING_SNAKE_CASE = original_model.state_dict()
SCREAMING_SNAKE_CASE = create_rename_keys(a )
for src, dest in rename_keys:
rename_key(a , a , a )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
SCREAMING_SNAKE_CASE = state_dict.pop(a )
if key.startswith('''Qformer.bert''' ):
SCREAMING_SNAKE_CASE = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
SCREAMING_SNAKE_CASE = key.replace('''self''' , '''attention''' )
if "llm_proj" in key:
SCREAMING_SNAKE_CASE = key.replace('''llm_proj''' , '''language_projection''' )
if "t5_proj" in key:
SCREAMING_SNAKE_CASE = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''llm_model''' ):
SCREAMING_SNAKE_CASE = key.replace('''llm_model''' , '''language_model''' )
if key.startswith('''t5''' ):
SCREAMING_SNAKE_CASE = key.replace('''t5''' , '''language''' )
SCREAMING_SNAKE_CASE = val
# read in qv biases
read_in_q_v_bias(a , a )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(a , strict=a )
SCREAMING_SNAKE_CASE = load_demo_image()
SCREAMING_SNAKE_CASE = '''What is unusual about this image?'''
# create processor
SCREAMING_SNAKE_CASE = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=a , image_std=a )
SCREAMING_SNAKE_CASE = InstructBlipProcessor(
image_processor=a , tokenizer=a , qformer_tokenizer=a , )
SCREAMING_SNAKE_CASE = processor(images=a , text=a , return_tensors='''pt''' ).to(a )
# make sure processor creates exact same pixel values
SCREAMING_SNAKE_CASE = vis_processors['''eval'''](a ).unsqueeze(0 ).to(a )
SCREAMING_SNAKE_CASE = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , a )
original_model.to(a )
hf_model.to(a )
with torch.no_grad():
if "vicuna" in model_name:
SCREAMING_SNAKE_CASE = original_model({'''image''': original_pixel_values, '''text_input''': [prompt]} ).logits
SCREAMING_SNAKE_CASE = hf_model(**a ).logits
else:
SCREAMING_SNAKE_CASE = original_model(
{'''image''': original_pixel_values, '''text_input''': [prompt], '''text_output''': ['''\n''']} ).logits
SCREAMING_SNAKE_CASE = tokenizer('''\n''' , return_tensors='''pt''' ).input_ids.to(a )
SCREAMING_SNAKE_CASE = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 )
SCREAMING_SNAKE_CASE = hf_model(**a , labels=a ).logits
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
SCREAMING_SNAKE_CASE = 1E-4 if '''vicuna''' in model_name else 1E-5
assert torch.allclose(original_logits.to(logits.device ) , a , atol=a )
print('''Looks ok!''' )
print('''Generating with original model...''' )
SCREAMING_SNAKE_CASE = original_model.generate({'''image''': original_pixel_values, '''prompt''': prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print('''Generating with HF model...''' )
SCREAMING_SNAKE_CASE = hf_model.generate(
**a , do_sample=a , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
SCREAMING_SNAKE_CASE = 2
print('''Original generation:''' , a )
SCREAMING_SNAKE_CASE = processor.batch_decode(a , skip_special_tokens=a )
SCREAMING_SNAKE_CASE = [text.strip() for text in output_text]
print('''HF generation:''' , a )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(a )
hf_model.save_pretrained(a )
if push_to_hub:
processor.push_to_hub(F"""Salesforce/{model_name}""" )
hf_model.push_to_hub(F"""Salesforce/{model_name}""" )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
__lowerCAmelCase = [
'instructblip-vicuna-7b',
'instructblip-vicuna-13b',
'instructblip-flan-t5-xl',
'instructblip-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='instructblip-flan-t5-xl',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
__lowerCAmelCase = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 201
|
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
__lowerCAmelCase = 4
__lowerCAmelCase = 3
class lowerCamelCase ( __lowerCamelCase ):
pass
def a ( a ) ->Union[str, Any]:
'''simple docstring'''
for shard in shards:
for i in range(a ):
yield {"i": i, "shard": shard}
def a ( ) ->List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = int(os.environ['''RANK'''] )
SCREAMING_SNAKE_CASE = int(os.environ['''WORLD_SIZE'''] )
SCREAMING_SNAKE_CASE = ArgumentParser()
parser.add_argument('''--streaming''' , type=a )
parser.add_argument('''--local_rank''' , type=a )
parser.add_argument('''--num_workers''' , type=a , default=0 )
SCREAMING_SNAKE_CASE = parser.parse_args()
SCREAMING_SNAKE_CASE = args.streaming
SCREAMING_SNAKE_CASE = args.num_workers
SCREAMING_SNAKE_CASE = {'''shards''': [F"""shard_{shard_idx}""" for shard_idx in range(a )]}
SCREAMING_SNAKE_CASE = IterableDataset.from_generator(a , gen_kwargs=a )
if not streaming:
SCREAMING_SNAKE_CASE = Dataset.from_list(list(a ) )
SCREAMING_SNAKE_CASE = split_dataset_by_node(a , rank=a , world_size=a )
SCREAMING_SNAKE_CASE = torch.utils.data.DataLoader(a , num_workers=a )
SCREAMING_SNAKE_CASE = NUM_SHARDS * NUM_ITEMS_PER_SHARD
SCREAMING_SNAKE_CASE = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
SCREAMING_SNAKE_CASE = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 201
| 1
|
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Dict = [int(snake_case__) for i in ip_va_address.split(".") if i.isdigit()]
return len(snake_case__) == 4 and all(0 <= int(snake_case__) <= 2_54 for octet in octets)
if __name__ == "__main__":
_lowercase = input().strip()
_lowercase = '''valid''' if is_ip_va_address_valid(ip) else '''invalid'''
print(f"{ip} is a {valid_or_invalid} IP v4 address.")
| 683
|
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[int] ,lowerCAmelCase__ : str = "" ,lowerCAmelCase__ : bool = False ) -> None:
'''simple docstring'''
lowerCAmelCase_ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
lowerCAmelCase_ : int = is_leaf
lowerCAmelCase_ : Optional[Any] = prefix
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : str ) -> tuple[str, str, str]:
'''simple docstring'''
lowerCAmelCase_ : Any = 0
for q, w in zip(self.prefix ,lowerCAmelCase__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : list[str] ) -> None:
'''simple docstring'''
for word in words:
self.insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : str ) -> None:
'''simple docstring'''
if self.prefix == word:
lowerCAmelCase_ : Optional[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowerCAmelCase_ : List[Any] = RadixNode(prefix=lowerCAmelCase__ ,is_leaf=lowerCAmelCase__ )
else:
lowerCAmelCase_ : Tuple = self.nodes[word[0]]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = incoming_node.match(
lowerCAmelCase__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowerCAmelCase__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowerCAmelCase_ : Optional[int] = remaining_prefix
lowerCAmelCase_ : Optional[int] = self.nodes[matching_string[0]]
lowerCAmelCase_ : List[Any] = RadixNode(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Dict = aux_node
if remaining_word == "":
lowerCAmelCase_ : List[str] = True
else:
self.nodes[matching_string[0]].insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : Any = self.nodes.get(word[0] ,lowerCAmelCase__ )
if not incoming_node:
return False
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = incoming_node.match(
lowerCAmelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : int = self.nodes.get(word[0] ,lowerCAmelCase__ )
if not incoming_node:
return False
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = incoming_node.match(
lowerCAmelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowerCAmelCase__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowerCAmelCase_ : str = list(self.nodes.values() )[0]
lowerCAmelCase_ : Tuple = merging_node.is_leaf
self.prefix += merging_node.prefix
lowerCAmelCase_ : Optional[int] = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowerCAmelCase_ : Optional[Any] = False
# If there is 1 edge, we merge it with its child
else:
lowerCAmelCase_ : Tuple = list(incoming_node.nodes.values() )[0]
lowerCAmelCase_ : Union[str, Any] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowerCAmelCase_ : str = merging_node.nodes
return True
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : int = 0 ) -> None:
'''simple docstring'''
if self.prefix != "":
print("-" * height ,self.prefix ," (leaf)" if self.is_leaf else "" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def UpperCamelCase ( ):
lowerCAmelCase_ : Dict = "banana bananas bandana band apple all beast".split()
lowerCAmelCase_ : List[Any] = RadixNode()
root.insert_many(snake_case__)
assert all(root.find(snake_case__) for word in words)
assert not root.find("bandanas")
assert not root.find("apps")
root.delete("all")
assert not root.find("all")
root.delete("banana")
assert not root.find("banana")
assert root.find("bananas")
return True
def UpperCamelCase ( ):
assert test_trie()
def UpperCamelCase ( ):
lowerCAmelCase_ : List[str] = RadixNode()
lowerCAmelCase_ : Optional[Any] = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(snake_case__)
print("Words:" , snake_case__)
print("Tree:")
root.print_tree()
if __name__ == "__main__":
main()
| 683
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
A : List[str] = logging.get_logger(__name__)
A : str = {
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/resolve/main/config.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/config.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/config.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json''',
}
class __lowerCamelCase ( a_ ):
"""simple docstring"""
a = "bloom"
a = ["past_key_values"]
a = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : Any=250880 , SCREAMING_SNAKE_CASE : int=64 , SCREAMING_SNAKE_CASE : Optional[int]=2 , SCREAMING_SNAKE_CASE : Dict=8 , SCREAMING_SNAKE_CASE : Optional[Any]=1e-5 , SCREAMING_SNAKE_CASE : List[Any]=0.02 , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : List[Any]=1 , SCREAMING_SNAKE_CASE : str=2 , SCREAMING_SNAKE_CASE : List[Any]=False , SCREAMING_SNAKE_CASE : List[str]=0.0 , SCREAMING_SNAKE_CASE : Any=0.0 , SCREAMING_SNAKE_CASE : List[str]=1 , SCREAMING_SNAKE_CASE : Any=False , **SCREAMING_SNAKE_CASE : str , ):
_A : Union[str, Any] = vocab_size
# Backward compatibility with n_embed kwarg
_A : str = kwargs.pop('n_embed' , SCREAMING_SNAKE_CASE)
_A : Any = hidden_size if n_embed is None else n_embed
_A : Optional[int] = n_layer
_A : str = n_head
_A : Optional[Any] = layer_norm_epsilon
_A : Any = initializer_range
_A : List[str] = use_cache
_A : Union[str, Any] = pretraining_tp
_A : Optional[int] = apply_residual_connection_post_layernorm
_A : Union[str, Any] = hidden_dropout
_A : Dict = attention_dropout
_A : Optional[int] = bos_token_id
_A : str = eos_token_id
_A : str = slow_but_exact
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
class __lowerCamelCase ( a_ ):
"""simple docstring"""
a = version.parse("1.12" )
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : PretrainedConfig , SCREAMING_SNAKE_CASE : str = "default" , SCREAMING_SNAKE_CASE : List[PatchingSpec] = None , SCREAMING_SNAKE_CASE : bool = False , ):
super().__init__(SCREAMING_SNAKE_CASE , task=SCREAMING_SNAKE_CASE , patching_specs=SCREAMING_SNAKE_CASE , use_past=SCREAMING_SNAKE_CASE)
if not getattr(self._config , 'pad_token_id' , SCREAMING_SNAKE_CASE):
# TODO: how to do that better?
_A : List[Any] = 0
@property
def A ( self : Tuple):
_A : Tuple = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}})
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE , direction='inputs' , inverted_values_shape=SCREAMING_SNAKE_CASE)
_A : Optional[int] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_A : Any = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def A ( self : Optional[Any]):
return self._config.n_layer
@property
def A ( self : Tuple):
return self._config.n_head
@property
def A ( self : List[str]):
return 1e-3
def A ( self : int , SCREAMING_SNAKE_CASE : "PreTrainedTokenizer" , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional["TensorType"] = None , ):
_A : List[Any] = super(SCREAMING_SNAKE_CASE , self).generate_dummy_inputs(
SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE)
# We need to order the input in the way they appears in the forward()
_A : List[str] = OrderedDict({'input_ids': common_inputs['input_ids']})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
_A , _A : Union[str, Any] = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_A : str = seqlen + 2
_A : List[str] = self._config.hidden_size // self.num_attention_heads
_A : Dict = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
_A : List[Any] = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
_A : Union[str, Any] = [
(torch.zeros(SCREAMING_SNAKE_CASE), torch.zeros(SCREAMING_SNAKE_CASE)) for _ in range(self.num_layers)
]
_A : List[str] = common_inputs['attention_mask']
if self.use_past:
_A : Optional[Any] = ordered_inputs['attention_mask'].dtype
_A : Optional[Any] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE)] , dim=1)
return ordered_inputs
@property
def A ( self : List[str]):
return 13
| 128
|
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
A : List[str] = get_logger(__name__)
class __lowerCamelCase :
"""simple docstring"""
a = "dummy_data"
a = "datasets"
a = False
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Union[Version, str] , SCREAMING_SNAKE_CASE : Optional[str] = None , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[List[Callable]] = None , ):
_A : Dict = 0
_A : Dict = dataset_name
_A : Any = cache_dir
_A : List[str] = use_local_dummy_data
_A : Optional[Any] = config
# download_callbacks take a single url as input
_A : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
_A : int = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
_A : Any = str(SCREAMING_SNAKE_CASE)
# to be downloaded
_A : Optional[Any] = None
_A : List[str] = None
@property
def A ( self : str):
if self._dummy_file is None:
_A : Tuple = self.download_dummy_data()
return self._dummy_file
@property
def A ( self : Optional[Any]):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name)
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name)
@property
def A ( self : Tuple):
return os.path.join(self.dummy_data_folder , 'dummy_data.zip')
def A ( self : int):
_A : Dict = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
_A : Optional[int] = cached_path(
SCREAMING_SNAKE_CASE , cache_dir=self.cache_dir , extract_compressed_file=SCREAMING_SNAKE_CASE , force_extract=SCREAMING_SNAKE_CASE)
return os.path.join(SCREAMING_SNAKE_CASE , self.dummy_file_name)
@property
def A ( self : List[str]):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file)
@property
def A ( self : str):
if self._bucket_url is None:
_A : Tuple = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/'))
return self._bucket_url
@property
def A ( self : str):
# return full path if its a dir
if os.path.isdir(self.dummy_file):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/').split('/')[:-1])
def A ( self : List[str] , SCREAMING_SNAKE_CASE : List[Any] , *SCREAMING_SNAKE_CASE : Optional[Any]):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
_A : Any = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
_A : Dict = self.dummy_file_name
# special case when data_url is a dict
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
return self.create_dummy_data_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
elif isinstance(SCREAMING_SNAKE_CASE , (list, tuple)):
return self.create_dummy_data_list(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
else:
return self.create_dummy_data_single(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
def A ( self : Optional[Any] , SCREAMING_SNAKE_CASE : str , *SCREAMING_SNAKE_CASE : str):
return self.download_and_extract(SCREAMING_SNAKE_CASE)
def A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple):
return self.download_and_extract(SCREAMING_SNAKE_CASE)
def A ( self : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Any):
return path
def A ( self : str):
return {}
def A ( self : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any]):
_A : List[str] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
for single_url in single_urls:
download_callback(SCREAMING_SNAKE_CASE)
else:
_A : Optional[Any] = single_urls
download_callback(SCREAMING_SNAKE_CASE)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
_A : List[Any] = [os.path.join(SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(SCREAMING_SNAKE_CASE).name)) for x in single_urls]
else:
_A : str = single_urls
_A : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(SCREAMING_SNAKE_CASE).name))
_A : Tuple = value
# make sure that values are unique
if all(isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) for i in dummy_data_dict.values()) and len(set(dummy_data_dict.values())) < len(
dummy_data_dict.values()):
# append key to value to make its name unique
_A : List[Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int):
_A : List[str] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
_A : Union[str, Any] = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , SCREAMING_SNAKE_CASE)) for url in data_url)
_A : Optional[Any] = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed') for url in data_url)
if data_url and (is_tf_records or is_pubmed_records):
_A : Optional[Any] = [data_url[0]] * len(SCREAMING_SNAKE_CASE)
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(SCREAMING_SNAKE_CASE)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_A : Any = os.path.join(SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(single_url.split('/')[-1]))
dummy_data_list.append(SCREAMING_SNAKE_CASE)
return dummy_data_list
def A ( self : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any):
for download_callback in self.download_callbacks:
download_callback(SCREAMING_SNAKE_CASE)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_A : Tuple = os.path.join(SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(data_url.split('/')[-1]))
if os.path.exists(SCREAMING_SNAKE_CASE) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def A ( self : str):
pass
def A ( self : str):
pass
def A ( self : Tuple , SCREAMING_SNAKE_CASE : Optional[Any]):
def _iter_archive_members(SCREAMING_SNAKE_CASE : str):
# this preserves the order of the members inside the ZIP archive
_A : Dict = Path(self.dummy_file).parent
_A : Union[str, Any] = path.relative_to(SCREAMING_SNAKE_CASE)
with ZipFile(self.local_path_to_dummy_data) as zip_file:
_A : Any = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix()):
yield dummy_parent_path.joinpath(SCREAMING_SNAKE_CASE)
_A : str = Path(SCREAMING_SNAKE_CASE)
_A : Any = _iter_archive_members(SCREAMING_SNAKE_CASE) if self.use_local_dummy_data else path.rglob('*')
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__')):
yield file_path.relative_to(SCREAMING_SNAKE_CASE).as_posix(), file_path.open('rb')
def A ( self : int , SCREAMING_SNAKE_CASE : Tuple):
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
_A : Tuple = [paths]
for path in paths:
if os.path.isfile(SCREAMING_SNAKE_CASE):
if os.path.basename(SCREAMING_SNAKE_CASE).startswith(('.', '__')):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(SCREAMING_SNAKE_CASE):
if os.path.basename(SCREAMING_SNAKE_CASE).startswith(('.', '__')):
continue
dirnames.sort()
for filename in sorted(SCREAMING_SNAKE_CASE):
if filename.startswith(('.', '__')):
continue
yield os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
| 128
| 1
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/config.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/config.json',
}
class __a ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = 'xlnet'
UpperCAmelCase__ : List[str] = ['mems']
UpperCAmelCase__ : int = {
'n_token': 'vocab_size', # Backward compatibility
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , UpperCamelCase__=32000 , UpperCamelCase__=1024 , UpperCamelCase__=24 , UpperCamelCase__=16 , UpperCamelCase__=4096 , UpperCamelCase__="gelu" , UpperCamelCase__=True , UpperCamelCase__="bi" , UpperCamelCase__=0.02 , UpperCamelCase__=1E-12 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=-1 , UpperCamelCase__=False , UpperCamelCase__="last" , UpperCamelCase__=True , UpperCamelCase__="tanh" , UpperCamelCase__=0.1 , UpperCamelCase__=5 , UpperCamelCase__=5 , UpperCamelCase__=5 , UpperCamelCase__=1 , UpperCamelCase__=2 , **UpperCamelCase__ , ):
SCREAMING_SNAKE_CASE_ : Tuple = vocab_size
SCREAMING_SNAKE_CASE_ : Tuple = d_model
SCREAMING_SNAKE_CASE_ : Optional[int] = n_layer
SCREAMING_SNAKE_CASE_ : int = n_head
if d_model % n_head != 0:
raise ValueError(F'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F'''`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})''' )
SCREAMING_SNAKE_CASE_ : Optional[int] = d_model // n_head
SCREAMING_SNAKE_CASE_ : Optional[Any] = ff_activation
SCREAMING_SNAKE_CASE_ : Any = d_inner
SCREAMING_SNAKE_CASE_ : List[Any] = untie_r
SCREAMING_SNAKE_CASE_ : Optional[Any] = attn_type
SCREAMING_SNAKE_CASE_ : int = initializer_range
SCREAMING_SNAKE_CASE_ : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE_ : List[Any] = dropout
SCREAMING_SNAKE_CASE_ : Optional[Any] = mem_len
SCREAMING_SNAKE_CASE_ : Optional[int] = reuse_len
SCREAMING_SNAKE_CASE_ : Optional[int] = bi_data
SCREAMING_SNAKE_CASE_ : Optional[Any] = clamp_len
SCREAMING_SNAKE_CASE_ : Tuple = same_length
SCREAMING_SNAKE_CASE_ : int = summary_type
SCREAMING_SNAKE_CASE_ : List[str] = summary_use_proj
SCREAMING_SNAKE_CASE_ : str = summary_activation
SCREAMING_SNAKE_CASE_ : Tuple = summary_last_dropout
SCREAMING_SNAKE_CASE_ : Optional[int] = start_n_top
SCREAMING_SNAKE_CASE_ : List[Any] = end_n_top
SCREAMING_SNAKE_CASE_ : int = bos_token_id
SCREAMING_SNAKE_CASE_ : Any = pad_token_id
SCREAMING_SNAKE_CASE_ : List[Any] = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'
' instead.' , UpperCamelCase__ , )
SCREAMING_SNAKE_CASE_ : List[Any] = kwargs["""use_cache"""]
SCREAMING_SNAKE_CASE_ : str = use_mems_eval
SCREAMING_SNAKE_CASE_ : List[str] = use_mems_train
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
@property
def __snake_case ( self ):
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def __snake_case ( self , UpperCamelCase__ ):
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 714
|
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class __a ( __A ):
'''simple docstring'''
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = SMALL_MODEL_IDENTIFIER
SCREAMING_SNAKE_CASE_ : List[Any] = 'pt'
SCREAMING_SNAKE_CASE_ : Tuple = 'tf'
def __snake_case ( self , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Dict = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : int = TFAutoModel.from_pretrained(self.test_model , from_pt=UpperCamelCase__ )
model_tf.save_pretrained(UpperCamelCase__ )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'mock_framework'
# Framework provided - return whatever the user provides
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FeaturesManager.determine_framework(self.test_model , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = FeaturesManager.determine_framework(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = FeaturesManager.determine_framework(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = FeaturesManager.determine_framework(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FeaturesManager.determine_framework(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Any = FeaturesManager.determine_framework(UpperCamelCase__ )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = MagicMock(return_value=UpperCamelCase__ )
with patch('transformers.onnx.features.is_tf_available' , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : str = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase__ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
SCREAMING_SNAKE_CASE_ : List[str] = MagicMock(return_value=UpperCamelCase__ )
with patch('transformers.onnx.features.is_torch_available' , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase__ , self.framework_tf )
# Both in environment -> use PyTorch
SCREAMING_SNAKE_CASE_ : Union[str, Any] = MagicMock(return_value=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = MagicMock(return_value=UpperCamelCase__ )
with patch('transformers.onnx.features.is_tf_available' , UpperCamelCase__ ), patch(
'transformers.onnx.features.is_torch_available' , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase__ , self.framework_pt )
# Both not in environment -> raise error
SCREAMING_SNAKE_CASE_ : Tuple = MagicMock(return_value=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Any = MagicMock(return_value=UpperCamelCase__ )
with patch('transformers.onnx.features.is_tf_available' , UpperCamelCase__ ), patch(
'transformers.onnx.features.is_torch_available' , UpperCamelCase__ ):
with self.assertRaises(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = FeaturesManager.determine_framework(self.test_model )
| 97
| 0
|
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def A ( snake_case__ , snake_case__=7 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = None
if token is not None:
SCREAMING_SNAKE_CASE__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
SCREAMING_SNAKE_CASE__ = """636036"""
SCREAMING_SNAKE_CASE__ = f"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
SCREAMING_SNAKE_CASE__ = requests.get(snake_case__ , headers=snake_case__ ).json()
return result["workflow_runs"]
def A ( snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = get_daily_ci_runs(snake_case__ )
SCREAMING_SNAKE_CASE__ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
SCREAMING_SNAKE_CASE__ = workflow_run["""id"""]
break
return workflow_run_id
def A ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = get_last_daily_ci_runs(snake_case__ )
if workflow_run_id is not None:
SCREAMING_SNAKE_CASE__ = get_artifacts_links(worflow_run_id=snake_case__ , token=snake_case__ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
SCREAMING_SNAKE_CASE__ = artifacts_links[artifact_name]
download_artifact(
artifact_name=snake_case__ , artifact_url=snake_case__ , output_dir=snake_case__ , token=snake_case__ )
def A ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
get_last_daily_ci_artifacts(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE__ = {}
for artifact_name in artifact_names:
SCREAMING_SNAKE_CASE__ = os.path.join(snake_case__ , f"""{artifact_name}.zip""" )
if os.path.isfile(snake_case__ ):
SCREAMING_SNAKE_CASE__ = {}
with zipfile.ZipFile(snake_case__ ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case__ ):
# read the file
with z.open(snake_case__ ) as f:
SCREAMING_SNAKE_CASE__ = f.read().decode("""UTF-8""" )
return results
| 196
|
"""simple docstring"""
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
A_ : Union[str, Any] = {
"susnato/ernie-m-base_pytorch": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json",
"susnato/ernie-m-large_pytorch": "https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json",
}
class lowerCamelCase (A__ ):
lowerCamelCase__ : int = 'ernie_m'
lowerCamelCase__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : List[str] , __UpperCAmelCase : int = 2_5_0_0_0_2 , __UpperCAmelCase : int = 7_6_8 , __UpperCAmelCase : int = 1_2 , __UpperCAmelCase : int = 1_2 , __UpperCAmelCase : int = 3_0_7_2 , __UpperCAmelCase : str = "gelu" , __UpperCAmelCase : float = 0.1 , __UpperCAmelCase : float = 0.1 , __UpperCAmelCase : int = 5_1_4 , __UpperCAmelCase : float = 0.02 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : float = 1e-05 , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : Union[str, Any]=0.0 , **__UpperCAmelCase : Any , ) -> Optional[Any]:
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = classifier_dropout
SCREAMING_SNAKE_CASE__ = is_decoder
SCREAMING_SNAKE_CASE__ = act_dropout
| 196
| 1
|
def _lowerCAmelCase( __A : float , __A : float , __A : int ):
if principal <= 0:
raise Exception("Principal borrowed must be > 0" )
if rate_per_annum < 0:
raise Exception("Rate of interest must be >= 0" )
if years_to_repay <= 0 or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise Exception("Years to repay must be an integer > 0" )
# Yearly rate is divided by 12 to get monthly rate
UpperCAmelCase = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
UpperCAmelCase = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708
|
lowerCAmelCase__ = {
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
lowerCAmelCase__ = {value: key for key, value in encode_dict.items()}
def _lowerCAmelCase( __A ):
UpperCAmelCase = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def _lowerCAmelCase( __A ):
if set(__A ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces" )
UpperCAmelCase = ""
for word in coded.split():
while len(__A ) != 0:
decoded += decode_dict[word[:5]]
UpperCAmelCase = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1
| 0
|
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 483
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase : Dict = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__lowerCAmelCase : int = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
__lowerCAmelCase : List[str] = {"facebook/blenderbot-3B": 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
__UpperCAmelCase = bs[:]
__UpperCAmelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase__ )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase = [chr(UpperCamelCase__ ) for n in cs]
return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) )
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = set()
__UpperCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase = char
return pairs
class A ( UpperCAmelCase ):
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ['''input_ids''', '''attention_mask''']
def __init__( self : str , __a : Union[str, Any] , __a : Optional[Any] , __a : List[Any]="replace" , __a : Union[str, Any]="<s>" , __a : Any="</s>" , __a : Dict="</s>" , __a : Dict="<s>" , __a : Tuple="<unk>" , __a : List[str]="<pad>" , __a : Any="<mask>" , __a : Dict=False , **__a : Union[str, Any] , ) -> Optional[int]:
__UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else bos_token
__UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else eos_token
__UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else sep_token
__UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else cls_token
__UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else unk_token
__UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
super().__init__(
errors=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , add_prefix_space=__a , **__a , )
with open(__a , encoding='''utf-8''' ) as vocab_handle:
__UpperCAmelCase = json.load(__a )
__UpperCAmelCase = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase = errors # how to handle errors in decoding
__UpperCAmelCase = bytes_to_unicode()
__UpperCAmelCase = {v: k for k, v in self.byte_encoder.items()}
with open(__a , encoding='''utf-8''' ) as merges_handle:
__UpperCAmelCase = merges_handle.read().split('''\n''' )[1:-1]
__UpperCAmelCase = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCAmelCase = dict(zip(__a , range(len(__a ) ) ) )
__UpperCAmelCase = {}
__UpperCAmelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def snake_case__ ( self : List[Any] ) -> Union[str, Any]:
return len(self.encoder )
def snake_case__ ( self : str ) -> int:
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case__ ( self : List[Any] , __a : Tuple ) -> List[Any]:
if token in self.cache:
return self.cache[token]
__UpperCAmelCase = tuple(__a )
__UpperCAmelCase = get_pairs(__a )
if not pairs:
return token
while True:
__UpperCAmelCase = min(__a , key=lambda __a : self.bpe_ranks.get(__a , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase = bigram
__UpperCAmelCase = []
__UpperCAmelCase = 0
while i < len(__a ):
try:
__UpperCAmelCase = word.index(__a , __a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase = j
if word[i] == first and i < len(__a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase = tuple(__a )
__UpperCAmelCase = new_word
if len(__a ) == 1:
break
else:
__UpperCAmelCase = get_pairs(__a )
__UpperCAmelCase = ''' '''.join(__a )
__UpperCAmelCase = word
return word
def snake_case__ ( self : int , __a : int ) -> List[Any]:
__UpperCAmelCase = []
for token in re.findall(self.pat , __a ):
__UpperCAmelCase = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__a ).split(''' ''' ) )
return bpe_tokens
def snake_case__ ( self : Optional[Any] , __a : Tuple ) -> str:
return self.encoder.get(__a , self.encoder.get(self.unk_token ) )
def snake_case__ ( self : Optional[int] , __a : Any ) -> List[str]:
return self.decoder.get(__a )
def snake_case__ ( self : Union[str, Any] , __a : List[str] ) -> List[Any]:
__UpperCAmelCase = ''''''.join(__a )
__UpperCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def snake_case__ ( self : Union[str, Any] , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase = os.path.join(
__a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCAmelCase = os.path.join(
__a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__a , ensure_ascii=__a ) + '''\n''' )
__UpperCAmelCase = 0
with open(__a , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __a : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
__UpperCAmelCase = token_index
writer.write(''' '''.join(__a ) + '''\n''' )
index += 1
return vocab_file, merge_file
def snake_case__ ( self : Union[str, Any] , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is None:
return [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1, 1] + ([0] * len(__a )) + [1]
def snake_case__ ( self : Dict , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case__ ( self : int , __a : Optional[int] , __a : int=False , **__a : Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__a ) > 0 and not text[0].isspace()):
__UpperCAmelCase = ''' ''' + text
return (text, kwargs)
def snake_case__ ( self : List[str] , __a : List[int] , __a : Optional[List[int]] = None ) -> Dict:
return token_ids_a + [self.eos_token_id]
def snake_case__ ( self : Optional[Any] , __a : "Conversation" ) -> List[int]:
__UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(__a )
__UpperCAmelCase = ''' '''.join(__a )
__UpperCAmelCase = self.encode(__a )
if len(__a ) > self.model_max_length:
__UpperCAmelCase = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 262
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _snake_case ( a__ , unittest.TestCase ):
snake_case__ = ShapEPipeline
snake_case__ = ["prompt"]
snake_case__ = ["prompt"]
snake_case__ = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
snake_case__ = False
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
return 32
@property
def lowerCamelCase__ ( self : Optional[Any] ):
return 32
@property
def lowerCamelCase__ ( self : int ):
return self.time_input_dim * 4
@property
def lowerCamelCase__ ( self : List[Any] ):
return 8
@property
def lowerCamelCase__ ( self : Tuple ):
__lowerCamelCase : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def lowerCamelCase__ ( self : Optional[Any] ):
torch.manual_seed(0 )
__lowerCamelCase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(UpperCAmelCase )
@property
def lowerCamelCase__ ( self : Optional[int] ):
torch.manual_seed(0 )
__lowerCamelCase : Optional[Any] = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
__lowerCamelCase : str = PriorTransformer(**UpperCAmelCase )
return model
@property
def lowerCamelCase__ ( self : Dict ):
torch.manual_seed(0 )
__lowerCamelCase : List[Any] = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
__lowerCamelCase : List[str] = ShapERenderer(**UpperCAmelCase )
return model
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : Tuple = self.dummy_prior
__lowerCamelCase : List[Any] = self.dummy_text_encoder
__lowerCamelCase : int = self.dummy_tokenizer
__lowerCamelCase : Optional[int] = self.dummy_renderer
__lowerCamelCase : Optional[Any] = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=1024 , prediction_type="sample" , use_karras_sigmas=UpperCAmelCase , clip_sample=UpperCAmelCase , clip_sample_range=1.0 , )
__lowerCamelCase : Optional[Any] = {
"prior": prior,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def lowerCamelCase__ ( self : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str=0 ):
if str(UpperCAmelCase ).startswith("mps" ):
__lowerCamelCase : Tuple = torch.manual_seed(UpperCAmelCase )
else:
__lowerCamelCase : Optional[Any] = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
__lowerCamelCase : List[Any] = {
"prompt": "horse",
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : Any = "cpu"
__lowerCamelCase : Dict = self.get_dummy_components()
__lowerCamelCase : Optional[Any] = self.pipeline_class(**UpperCAmelCase )
__lowerCamelCase : Optional[Any] = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCamelCase : Dict = pipe(**self.get_dummy_inputs(UpperCAmelCase ) )
__lowerCamelCase : Tuple = output.images[0]
__lowerCamelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowerCamelCase : Union[str, Any] = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self : Union[str, Any] ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : str = torch_device == "cpu"
__lowerCamelCase : Union[str, Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCAmelCase , relax_max_difference=UpperCAmelCase , )
def lowerCamelCase__ ( self : Dict ):
__lowerCamelCase : str = self.get_dummy_components()
__lowerCamelCase : Optional[int] = self.pipeline_class(**UpperCAmelCase )
__lowerCamelCase : int = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCamelCase : int = 1
__lowerCamelCase : List[str] = 2
__lowerCamelCase : List[Any] = self.get_dummy_inputs(UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
__lowerCamelCase : str = batch_size * [inputs[key]]
__lowerCamelCase : Optional[int] = pipe(**UpperCAmelCase , num_images_per_prompt=UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def lowerCamelCase__ ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : List[str] ):
__lowerCamelCase : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_np_out.npy" )
__lowerCamelCase : Optional[int] = ShapEPipeline.from_pretrained("openai/shap-e" )
__lowerCamelCase : Union[str, Any] = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCamelCase : int = torch.Generator(device=UpperCAmelCase ).manual_seed(0 )
__lowerCamelCase : Tuple = pipe(
"a shark" , generator=UpperCAmelCase , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase )
| 366
|
"""simple docstring"""
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class _snake_case :
snake_case__ = None
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = None
snake_case__ = None
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = True
snake_case__ = None
snake_case__ = 1
snake_case__ = None
snake_case__ = False
snake_case__ = None
snake_case__ = None
def lowerCamelCase__ ( self : Any ):
return self.__class__(**{k: copy.deepcopy(UpperCAmelCase ) for k, v in self.__dict__.items()} )
| 366
| 1
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Any ):
# Checks if the entire collection has been sorted
if len(_lowerCamelCase ) <= 1 or n <= 1:
return
insert_next(_lowerCamelCase , n - 1 )
rec_insertion_sort(_lowerCamelCase , n - 1 )
def UpperCamelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict ):
# Checks order between adjacent elements
if index >= len(_lowerCamelCase ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
A__, A__ = (
collection[index],
collection[index - 1],
)
insert_next(_lowerCamelCase , index + 1 )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] =input("Enter integers separated by spaces: ")
__lowerCAmelCase : list[int] =[int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 440
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__:Optional[Any] = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Optional[Any] = ["""ReformerTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:List[str] = ["""ReformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:int = [
"""REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ReformerAttention""",
"""ReformerForMaskedLM""",
"""ReformerForQuestionAnswering""",
"""ReformerForSequenceClassification""",
"""ReformerLayer""",
"""ReformerModel""",
"""ReformerModelWithLMHead""",
"""ReformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__:int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 528
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__UpperCamelCase : Tuple = None
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : int = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
__UpperCamelCase : Optional[Any] = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
__UpperCamelCase : Optional[int] = {
"""moussaKam/mbarthez""": 1024,
"""moussaKam/barthez""": 1024,
"""moussaKam/barthez-orangesum-title""": 1024,
}
__UpperCamelCase : Tuple = """▁"""
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
__a =VOCAB_FILES_NAMES
__a =PRETRAINED_VOCAB_FILES_MAP
__a =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a =["input_ids", "attention_mask"]
__a =BarthezTokenizer
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="</s>" , lowerCamelCase="<s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase="<mask>" , **lowerCamelCase , ) ->Optional[int]:
'''simple docstring'''
__a = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
super().__init__(
lowerCamelCase , tokenizer_file=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , **lowerCamelCase , )
__a = vocab_file
__a = False if not self.vocab_file else True
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase = None ) ->List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__a = [self.cls_token_id]
__a = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase = None ) ->List[int]:
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase = None ) ->Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__a = os.path.join(
lowerCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ):
copyfile(self.vocab_file , lowerCamelCase )
return (out_vocab_file,)
| 718
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: List[Any] ) -> Dict:
"""simple docstring"""
__a , __a = image.size
__a , __a = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__a = image.resize((w, h), resample=PIL_INTERPOLATION['lanczos'] )
__a = np.array(SCREAMING_SNAKE_CASE__ ).astype(np.floataa ) / 2_5_5.0
__a = image[None].transpose(0, 3, 1, 2 )
__a = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
return 2.0 * image - 1.0
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) ->Dict:
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase )
@torch.no_grad()
def __call__( self , lowerCamelCase = None , lowerCamelCase = 1 , lowerCamelCase = 100 , lowerCamelCase = 0.0 , lowerCamelCase = None , lowerCamelCase = "pil" , lowerCamelCase = True , ) ->Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
if isinstance(lowerCamelCase , PIL.Image.Image ):
__a = 1
elif isinstance(lowerCamelCase , torch.Tensor ):
__a = image.shape[0]
else:
raise ValueError(F"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowerCamelCase )}""" )
if isinstance(lowerCamelCase , PIL.Image.Image ):
__a = preprocess(lowerCamelCase )
__a , __a = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__a = (batch_size, self.unet.config.in_channels // 2, height, width)
__a = next(self.unet.parameters() ).dtype
__a = randn_tensor(lowerCamelCase , generator=lowerCamelCase , device=self.device , dtype=lowerCamelCase )
__a = image.to(device=self.device , dtype=lowerCamelCase )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(lowerCamelCase , device=self.device )
__a = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__a = {}
if accepts_eta:
__a = eta
for t in self.progress_bar(lowerCamelCase ):
# concat latents and low resolution image in the channel dimension.
__a = torch.cat([latents, image] , dim=1 )
__a = self.scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
# predict the noise residual
__a = self.unet(lowerCamelCase , lowerCamelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
__a = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
# decode the image latents with the VQVAE
__a = self.vqvae.decode(lowerCamelCase ).sample
__a = torch.clamp(lowerCamelCase , -1.0 , 1.0 )
__a = image / 2 + 0.5
__a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase )
| 270
| 0
|
'''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, *A, **A ):
'''simple docstring'''
super().__init__(*A, **A )
SCREAMING_SNAKE_CASE : Tuple = {}
def UpperCamelCase_ ( self, A, *A, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = super().add_tokens(A, *A, **A )
if num_added_tokens == 0:
raise ValueError(
F"The tokenizer already contains the token {placeholder_token}. Please pass a different"
' `placeholder_token` that is not already in the tokenizer.' )
def UpperCamelCase_ ( self, A, *A, A=1, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = []
if num_vec_per_token == 1:
self.try_adding_tokens(A, *A, **A )
output.append(A )
else:
SCREAMING_SNAKE_CASE : Dict = []
for i in range(A ):
SCREAMING_SNAKE_CASE : str = placeholder_token + F"_{i}"
self.try_adding_tokens(A, *A, **A )
output.append(A )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F"The tokenizer already has placeholder token {token} that can get confused with"
F" {placeholder_token}keep placeholder tokens independent" )
SCREAMING_SNAKE_CASE : Dict = output
def UpperCamelCase_ ( self, A, A=False, A=1.0 ):
'''simple docstring'''
if isinstance(A, A ):
SCREAMING_SNAKE_CASE : List[Any] = []
for i in range(len(A ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i], vector_shuffle=A ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
SCREAMING_SNAKE_CASE : Tuple = self.token_map[placeholder_token]
SCREAMING_SNAKE_CASE : int = tokens[: 1 + int(len(A ) * prop_tokens_to_load )]
if vector_shuffle:
SCREAMING_SNAKE_CASE : Union[str, Any] = copy.copy(A )
random.shuffle(A )
SCREAMING_SNAKE_CASE : Dict = text.replace(A, ' '.join(A ) )
return text
def __call__( self, A, *A, A=False, A=1.0, **A ):
'''simple docstring'''
return super().__call__(
self.replace_placeholder_tokens_in_text(
A, vector_shuffle=A, prop_tokens_to_load=A ), *A, **A, )
def UpperCamelCase_ ( self, A, *A, A=False, A=1.0, **A ):
'''simple docstring'''
return super().encode(
self.replace_placeholder_tokens_in_text(
A, vector_shuffle=A, prop_tokens_to_load=A ), *A, **A, )
| 28
|
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Dict = nn.ModuleList(A )
def UpperCamelCase_ ( self, A, A, A, A, A, A = None, A = None, A = None, A = None, A = False, A = True, ):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(A, A, self.nets ) ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = controlnet(
A, A, A, A, A, A, A, A, A, A, A, )
# merge samples
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = down_samples, mid_sample
else:
SCREAMING_SNAKE_CASE : str = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(A, A )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def UpperCamelCase_ ( self, A, A = True, A = None, A = False, A = None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Optional[int] = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
A, is_main_process=A, save_function=A, safe_serialization=A, variant=A, )
idx += 1
SCREAMING_SNAKE_CASE : List[Any] = model_path_to_save + F"_{idx}"
@classmethod
def UpperCamelCase_ ( cls, A, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : List[Any] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
SCREAMING_SNAKE_CASE : Optional[Any] = pretrained_model_path
while os.path.isdir(A ):
SCREAMING_SNAKE_CASE : Optional[int] = ControlNetModel.from_pretrained(A, **A )
controlnets.append(A )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = pretrained_model_path + F"_{idx}"
logger.info(F"{len(A )} controlnets loaded from {pretrained_model_path}." )
if len(A ) == 0:
raise ValueError(
F"No ControlNets found under {os.path.dirname(A )}. Expected at least {pretrained_model_path + '_0'}." )
return cls(A )
| 28
| 1
|
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[str] , snake_case_ : int , snake_case_ : Optional[Any]=None , snake_case_ : List[str]=None ):
UpperCamelCase_: List[Any] = data
UpperCamelCase_: List[Any] = previous
UpperCamelCase_: Tuple = next_node
def __str__( self : Dict ):
return f'''{self.data}'''
def lowerCAmelCase__ ( self : List[str] ):
return self.data
def lowerCAmelCase__ ( self : Any ):
return self.next
def lowerCAmelCase__ ( self : List[str] ):
return self.previous
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = head
def __iter__( self : Union[str, Any] ):
return self
def lowerCAmelCase__ ( self : Union[str, Any] ):
if not self.current:
raise StopIteration
else:
UpperCamelCase_: Dict = self.current.get_data()
UpperCamelCase_: Tuple = self.current.get_next()
return value
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : int ):
UpperCamelCase_: Optional[int] = None # First node in list
UpperCamelCase_: Dict = None # Last node in list
def __str__( self : Tuple ):
UpperCamelCase_: int = self.head
UpperCamelCase_: Tuple = []
while current is not None:
nodes.append(current.get_data() )
UpperCamelCase_: List[str] = current.get_next()
return " ".join(str(snake_case_ ) for node in nodes )
def __contains__( self : int , snake_case_ : int ):
UpperCamelCase_: Optional[Any] = self.head
while current:
if current.get_data() == value:
return True
UpperCamelCase_: Any = current.get_next()
return False
def __iter__( self : Any ):
return LinkedListIterator(self.head )
def lowerCAmelCase__ ( self : Tuple ):
if self.head:
return self.head.get_data()
return None
def lowerCAmelCase__ ( self : Optional[Any] ):
if self.tail:
return self.tail.get_data()
return None
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : Node ):
if self.head is None:
UpperCamelCase_: Tuple = node
UpperCamelCase_: Optional[int] = node
else:
self.insert_before_node(self.head , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Node ):
if self.head is None:
self.set_head(snake_case_ )
else:
self.insert_after_node(self.tail , snake_case_ )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : int ):
UpperCamelCase_: Any = Node(snake_case_ )
if self.head is None:
self.set_head(snake_case_ )
else:
self.set_tail(snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Node , snake_case_ : Node ):
UpperCamelCase_: str = node
UpperCamelCase_: int = node.previous
if node.get_previous() is None:
UpperCamelCase_: int = node_to_insert
else:
UpperCamelCase_: Dict = node_to_insert
UpperCamelCase_: int = node_to_insert
def lowerCAmelCase__ ( self : Dict , snake_case_ : Node , snake_case_ : Node ):
UpperCamelCase_: Tuple = node
UpperCamelCase_: Dict = node.next
if node.get_next() is None:
UpperCamelCase_: Union[str, Any] = node_to_insert
else:
UpperCamelCase_: str = node_to_insert
UpperCamelCase_: int = node_to_insert
def lowerCAmelCase__ ( self : Tuple , snake_case_ : int , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = 1
UpperCamelCase_: List[str] = Node(snake_case_ )
UpperCamelCase_: Optional[Any] = self.head
while node:
if current_position == position:
self.insert_before_node(snake_case_ , snake_case_ )
return
current_position += 1
UpperCamelCase_: Dict = node.next
self.insert_after_node(self.tail , snake_case_ )
def lowerCAmelCase__ ( self : int , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = self.head
while node:
if node.get_data() == item:
return node
UpperCamelCase_: List[Any] = node.get_next()
raise Exception("""Node not found""" )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : List[str] ):
if (node := self.get_node(snake_case_ )) is not None:
if node == self.head:
UpperCamelCase_: Optional[int] = self.head.get_next()
if node == self.tail:
UpperCamelCase_: Union[str, Any] = self.tail.get_previous()
self.remove_node_pointers(snake_case_ )
@staticmethod
def lowerCAmelCase__ ( snake_case_ : Node ):
if node.get_next():
UpperCamelCase_: str = node.previous
if node.get_previous():
UpperCamelCase_: int = node.next
UpperCamelCase_: List[str] = None
UpperCamelCase_: int = None
def lowerCAmelCase__ ( self : str ):
return self.head is None
def A__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670
|
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: List[Any] = inspect.getfile(accelerate.test_utils )
UpperCamelCase_: List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
UpperCamelCase_: str = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Any = f'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
UpperCamelCase_: Dict = [sys.executable] + distributed_args
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
| 670
| 1
|
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__lowerCAmelCase : int = 16
__lowerCAmelCase : List[Any] = 32
def __lowerCAmelCase ( __UpperCamelCase : Accelerator , __UpperCamelCase : int = 1_6 , __UpperCamelCase : str = "bert-base-cased" ):
'''simple docstring'''
snake_case_ : str = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
snake_case_ : List[str] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__UpperCamelCase : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ : str = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case_ : Tuple = datasets.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=SCREAMING_SNAKE_CASE_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ : Dict = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__UpperCamelCase : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=1_2_8 , return_tensors="""pt""" )
return tokenizer.pad(SCREAMING_SNAKE_CASE_ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
snake_case_ : Union[str, Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
snake_case_ : Any = DataLoader(
tokenized_datasets["""validation"""] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
return train_dataloader, eval_dataloader
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : List[str] ):
'''simple docstring'''
model.eval()
snake_case_ : List[Any] = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ : List[Any] = model(**SCREAMING_SNAKE_CASE_ )
snake_case_ : Any = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
snake_case_ , snake_case_ : List[str] = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(SCREAMING_SNAKE_CASE_ ) - 1:
snake_case_ : int = predictions[: len(eval_dataloader.dataset ) - samples_seen]
snake_case_ : List[Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ , )
snake_case_ : Any = metric.compute()
return eval_metric["accuracy"]
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : str = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ : Any = config["""lr"""]
snake_case_ : Tuple = int(config["""num_epochs"""] )
snake_case_ : List[Any] = int(config["""seed"""] )
snake_case_ : Union[str, Any] = int(config["""batch_size"""] )
snake_case_ : Dict = args.model_name_or_path
set_seed(SCREAMING_SNAKE_CASE_ )
snake_case_ , snake_case_ : Any = get_dataloaders(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ : Tuple = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ )
# Instantiate optimizer
snake_case_ : Union[str, Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
snake_case_ : int = optimizer_cls(params=model.parameters() , lr=SCREAMING_SNAKE_CASE_ )
if accelerator.state.deepspeed_plugin is not None:
snake_case_ : Tuple = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
snake_case_ : Dict = 1
snake_case_ : Dict = (len(SCREAMING_SNAKE_CASE_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
snake_case_ : Any = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE_ , num_warmup_steps=0 , num_training_steps=SCREAMING_SNAKE_CASE_ , )
else:
snake_case_ : str = DummyScheduler(SCREAMING_SNAKE_CASE_ , total_num_steps=SCREAMING_SNAKE_CASE_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Any = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# We need to keep track of how many total steps we have iterated over
snake_case_ : Optional[Any] = 0
# We also need to keep track of the stating epoch so files are named properly
snake_case_ : List[str] = 0
snake_case_ : str = evaluate.load("""glue""" , """mrpc""" )
snake_case_ : int = num_epochs
if args.partial_train_epoch is not None:
snake_case_ : Union[str, Any] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
snake_case_ : List[Any] = args.resume_from_checkpoint.split("""epoch_""" )[1]
snake_case_ : Dict = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
snake_case_ : Tuple = int(SCREAMING_SNAKE_CASE_ ) + 1
snake_case_ : Union[str, Any] = evaluation_loop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.print("""resumed checkpoint performance:""" , SCREAMING_SNAKE_CASE_ )
accelerator.print("""resumed checkpoint\'s scheduler\'s lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers\'s lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , F'state_{starting_epoch-1}.json' ) , """r""" ) as f:
snake_case_ : Union[str, Any] = json.load(SCREAMING_SNAKE_CASE_ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
snake_case_ : Optional[Any] = {}
for epoch in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
snake_case_ : Tuple = model(**SCREAMING_SNAKE_CASE_ )
snake_case_ : Union[str, Any] = outputs.loss
snake_case_ : Union[str, Any] = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
snake_case_ : Tuple = F'epoch_{epoch}'
snake_case_ : Optional[int] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
snake_case_ : str = evaluation_loop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
snake_case_ : str = accuracy
snake_case_ : Any = lr_scheduler.get_lr()[0]
snake_case_ : Optional[int] = optimizer.param_groups[0]["""lr"""]
snake_case_ : List[str] = epoch
snake_case_ : Optional[Any] = overall_step
accelerator.print(F'epoch {epoch}:' , SCREAMING_SNAKE_CASE_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'state_{epoch}.json' ) , """w""" ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[int] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=SCREAMING_SNAKE_CASE_ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=SCREAMING_SNAKE_CASE_ , )
parser.add_argument(
"""--output_dir""" , type=SCREAMING_SNAKE_CASE_ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=SCREAMING_SNAKE_CASE_ , default=2 , help="""Number of train epochs.""" , )
snake_case_ : int = parser.parse_args()
snake_case_ : Optional[int] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 4_2, """batch_size""": 1_6}
training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 58
|
"""simple docstring"""
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def lowercase (*SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Union[Dict, Any]] = None , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 ) -> Optional[int]:
from .. import __version__
SCREAMING_SNAKE_CASE = take_from
SCREAMING_SNAKE_CASE = ()
if not isinstance(args[0] , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(SCREAMING_SNAKE_CASE_ ).base_version ) >= version.parse(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
F'The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''
F' version {__version__} is >= {version_name}' )
SCREAMING_SNAKE_CASE = None
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(SCREAMING_SNAKE_CASE_ ),)
SCREAMING_SNAKE_CASE = F'The `{attribute}` argument is deprecated and will be removed in version {version_name}.'
elif hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
values += (getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ),)
SCREAMING_SNAKE_CASE = F'The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'
elif deprecated_kwargs is None:
SCREAMING_SNAKE_CASE = F'`{attribute}` is deprecated and will be removed in version {version_name}.'
if warning is not None:
SCREAMING_SNAKE_CASE = warning + ' ' if standard_warn else ''
warnings.warn(warning + message , SCREAMING_SNAKE_CASE_ , stacklevel=SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) > 0:
SCREAMING_SNAKE_CASE = inspect.getouterframes(inspect.currentframe() )[1]
SCREAMING_SNAKE_CASE = call_frame.filename
SCREAMING_SNAKE_CASE = call_frame.lineno
SCREAMING_SNAKE_CASE = call_frame.function
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`' )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return
elif len(SCREAMING_SNAKE_CASE_ ) == 1:
return values[0]
return values
| 247
| 0
|
import unittest
from knapsack import knapsack as k
class __snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = 0
lowerCAmelCase__ = [0]
lowerCAmelCase__ = [0]
lowerCAmelCase__ = len(a_ )
self.assertEqual(k.knapsack(a_ ,a_ ,a_ ,a_ ) ,0 )
lowerCAmelCase__ = [60]
lowerCAmelCase__ = [10]
lowerCAmelCase__ = len(a_ )
self.assertEqual(k.knapsack(a_ ,a_ ,a_ ,a_ ) ,0 )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = 3
lowerCAmelCase__ = [1, 2, 3]
lowerCAmelCase__ = [3, 2, 1]
lowerCAmelCase__ = len(a_ )
self.assertEqual(k.knapsack(a_ ,a_ ,a_ ,a_ ) ,5 )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = 50
lowerCAmelCase__ = [60, 100, 120]
lowerCAmelCase__ = [10, 20, 30]
lowerCAmelCase__ = len(a_ )
self.assertEqual(k.knapsack(a_ ,a_ ,a_ ,a_ ) ,220 )
if __name__ == "__main__":
unittest.main()
| 703
|
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ = "x" , snake_case__ = 10**-10 , snake_case__ = 1 , ) -> complex:
"""simple docstring"""
lowerCAmelCase__ = symbols(snake_case__ )
lowerCAmelCase__ = lambdify(snake_case__ , snake_case__ )
lowerCAmelCase__ = lambdify(snake_case__ , diff(snake_case__ , snake_case__ ) )
lowerCAmelCase__ = starting_point
while True:
if diff_function(snake_case__ ) != 0:
lowerCAmelCase__ = prev_guess - multiplicity * func(snake_case__ ) / diff_function(
snake_case__ )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
lowerCAmelCase__ = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}""")
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
f"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
f"""{newton_raphson('exp(x) - 1', 1_0, precision=0.0_0_5)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 604
| 0
|
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
SCREAMING_SNAKE_CASE__ = LxmertConfig.from_json_file(snake_case__ )
print(f'''Building PyTorch model from configuration: {config}''' )
SCREAMING_SNAKE_CASE__ = LxmertForPreTraining(snake_case__ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , snake_case__ )
if __name__ == "__main__":
_A : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_A : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 100
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class snake_case__ ( TensorFormatter[Mapping, "torch.Tensor", Mapping]):
'''simple docstring'''
def __init__( self , a__=None , **a__ ) -> Tuple:
'''simple docstring'''
super().__init__(features=a__ )
__snake_case :int = torch_tensor_kwargs
import torch # noqa import torch at initialization
def __lowercase ( self , a__ ) -> List[str]:
'''simple docstring'''
import torch
if isinstance(a__ , a__ ) and column:
if all(
isinstance(a__ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(a__ )
return column
def __lowercase ( self , a__ ) -> Union[str, Any]:
'''simple docstring'''
import torch
if isinstance(a__ , (str, bytes, type(a__ )) ):
return value
elif isinstance(a__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
__snake_case :Optional[int] = {}
if isinstance(a__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
__snake_case :List[Any] = {"""dtype""": torch.intaa}
elif isinstance(a__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
__snake_case :Tuple = {"""dtype""": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(a__ , PIL.Image.Image ):
__snake_case :Union[str, Any] = np.asarray(a__ )
return torch.tensor(a__ , **{**default_dtype, **self.torch_tensor_kwargs} )
def __lowercase ( self , a__ ) -> str:
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(a__ , """__array__""" ) and not isinstance(a__ , torch.Tensor ):
__snake_case :Optional[Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(a__ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(a__ ) for substruct in data_struct] )
elif isinstance(a__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(a__ ) for substruct in data_struct] )
return self._tensorize(a__ )
def __lowercase ( self , a__ ) -> Union[str, Any]:
'''simple docstring'''
return map_nested(self._recursive_tensorize , a__ , map_list=a__ )
def __lowercase ( self , a__ ) -> Mapping:
'''simple docstring'''
__snake_case :Tuple = self.numpy_arrow_extractor().extract_row(a__ )
__snake_case :Any = self.python_features_decoder.decode_row(a__ )
return self.recursive_tensorize(a__ )
def __lowercase ( self , a__ ) -> "torch.Tensor":
'''simple docstring'''
__snake_case :List[str] = self.numpy_arrow_extractor().extract_column(a__ )
__snake_case :List[Any] = self.python_features_decoder.decode_column(a__ , pa_table.column_names[0] )
__snake_case :Optional[Any] = self.recursive_tensorize(a__ )
__snake_case :Any = self._consolidate(a__ )
return column
def __lowercase ( self , a__ ) -> Mapping:
'''simple docstring'''
__snake_case :Optional[int] = self.numpy_arrow_extractor().extract_batch(a__ )
__snake_case :Tuple = self.python_features_decoder.decode_batch(a__ )
__snake_case :Tuple = self.recursive_tensorize(a__ )
for column_name in batch:
__snake_case :str = self._consolidate(batch[column_name] )
return batch
| 455
| 0
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = StableDiffusionSAGPipeline
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_BATCH_PARAMS
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE = False
def snake_case_ ( self ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
UpperCAmelCase = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0 )
UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCAmelCase = CLIPTextModel(_snake_case )
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def snake_case_ ( self , _snake_case , _snake_case=0 ) -> Any:
"""simple docstring"""
if str(_snake_case ).startswith('''mps''' ):
UpperCAmelCase = torch.manual_seed(_snake_case )
else:
UpperCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
UpperCAmelCase = {
'''prompt''': '''.''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 1.0,
'''sag_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def snake_case_ ( self ) -> List[Any]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self ) -> Optional[Any]:
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
UpperCAmelCase = sag_pipe.to(_snake_case )
sag_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase = '''.'''
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = sag_pipe(
[prompt] , generator=_snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
UpperCAmelCase = output.images
UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def snake_case_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
UpperCAmelCase = sag_pipe.to(_snake_case )
sag_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase = '''.'''
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = sag_pipe(
[prompt] , generator=_snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
UpperCAmelCase = output.images
UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def snake_case_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
UpperCAmelCase = sag_pipe.to(_snake_case )
sag_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase = '''.'''
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = sag_pipe(
[prompt] , width=768 , height=512 , generator=_snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' , )
UpperCAmelCase = output.images
assert image.shape == (1, 512, 768, 3)
| 391
|
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
__magic_name__ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("", "|", "|"),
datarow=DataRow("", "|", "|"),
padding=1,
with_header_hide=None,
)
__magic_name__ = []
__magic_name__ = []
__magic_name__ = {"type": "section", "text": {"type": "plain_text", "text": "No failed tests! 🤗", "emoji": True}}
__magic_name__ = [
{
"type": "header",
"text": {
"type": "plain_text",
"text": f'''🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results''',
"emoji": True,
},
}
]
__magic_name__ = 0
for log in Path().glob("*.log"):
__magic_name__ = 0
with open(log, "r") as f:
for line in f:
__magic_name__ = json.loads(line)
if line.get("nodeid", "") != "":
__magic_name__ = line["nodeid"]
if line.get("duration", None) is not None:
__magic_name__ = f'''{line["duration"]:.4f}'''
if line.get("outcome", "") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("_")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
__magic_name__ = []
log.unlink()
__magic_name__ = ""
__magic_name__ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
__magic_name__ = []
__magic_name__ = {}
for test in failed_tests:
__magic_name__ = test[0].split("::")
__magic_name__ = data[0].split("/")[-1]
if data[0] not in filesafailed:
__magic_name__ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
__magic_name__ = [test[0] for test in failed_table]
__magic_name__ = list(set(files))
# Count number of instances in failed_tests
__magic_name__ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
__magic_name__ = tabulate(
table,
headers=["Test Location", "Num Failed"],
tablefmt=hf_table_format,
stralign="right",
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
__magic_name__ = "Too many failed tests, please see the full report in the Action results."
__magic_name__ = len(err) + 10
__magic_name__ = message[: 3000 - offset] + f'''\n...\n```\n{err}'''
print(f'''### {message}''')
else:
__magic_name__ = "No failed tests! 🤗"
print(f'''## {message}''')
payload.append(no_error_payload)
if os.environ.get("TEST_TYPE", "") != "":
from slack_sdk import WebClient
__magic_name__ = WebClient(token=os.environ["SLACK_API_TOKEN"])
if message != "No failed tests! 🤗":
__magic_name__ = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": message,
},
}
payload.append(md_report)
__magic_name__ = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*For more details:*",
},
"accessory": {
"type": "button",
"text": {
"type": "plain_text",
"text": "Check Action results",
"emoji": True,
},
"url": f'''https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
payload.append(action_button)
__magic_name__ = {
"type": "context",
"elements": [
{
"type": "plain_text",
"text": f'''Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}''',
}
],
}
payload.append(date_report)
__magic_name__ = client.chat_postMessage(channel="#accelerate-ci-daily", text=message, blocks=payload)
__magic_name__ = response.data["ts"]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
__magic_name__ = ""
for i, row in enumerate(test_failures):
if row[0] != test_class:
__magic_name__ = row[0]
else:
__magic_name__ = ""
__magic_name__ = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'''Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```''',
},
}
client.chat_postMessage(
channel="#accelerate-ci-daily",
thread_ts=ts,
blocks=[payload],
)
| 391
| 1
|
'''simple docstring'''
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : Tuple = ['''a''', '''b''', '''c''']
# Defaults to last layer if both are None
lowercase__ , lowercase__ : Optional[Any] = get_aligned_output_features_output_indices(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , ['''c'''] )
self.assertEqual(__lowerCAmelCase , [2] )
# Out indices set to match out features
lowercase__ , lowercase__ : int = get_aligned_output_features_output_indices(['''a''', '''c'''] , __lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , ['''a''', '''c'''] )
self.assertEqual(__lowerCAmelCase , [0, 2] )
# Out features set to match out indices
lowercase__ , lowercase__ : Union[str, Any] = get_aligned_output_features_output_indices(__lowerCAmelCase , [0, 2] , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , ['''a''', '''c'''] )
self.assertEqual(__lowerCAmelCase , [0, 2] )
# Out features selected from negative indices
lowercase__ , lowercase__ : Tuple = get_aligned_output_features_output_indices(__lowerCAmelCase , [-3, -1] , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , ['''a''', '''c'''] )
self.assertEqual(__lowerCAmelCase , [-3, -1] )
def _lowerCAmelCase( self ) -> Union[str, Any]:
# Stage names must be set
with self.assertRaises(__lowerCAmelCase ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , __lowerCAmelCase )
# Out features must be a list
with self.assertRaises(__lowerCAmelCase ):
verify_out_features_out_indices(('''a''', '''b''') , (0, 1) , ['''a''', '''b'''] )
# Out features must be a subset of stage names
with self.assertRaises(__lowerCAmelCase ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , ['''a'''] )
# Out indices must be a list or tuple
with self.assertRaises(__lowerCAmelCase ):
verify_out_features_out_indices(__lowerCAmelCase , 0 , ['''a''', '''b'''] )
# Out indices must be a subset of stage names
with self.assertRaises(__lowerCAmelCase ):
verify_out_features_out_indices(__lowerCAmelCase , (0, 1) , ['''a'''] )
# Out features and out indices must be the same length
with self.assertRaises(__lowerCAmelCase ):
verify_out_features_out_indices(['''a''', '''b'''] , (0,) , ['''a''', '''b''', '''c'''] )
# Out features should match out indices
with self.assertRaises(__lowerCAmelCase ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 2) , ['''a''', '''b''', '''c'''] )
# Out features and out indices should be in order
with self.assertRaises(__lowerCAmelCase ):
verify_out_features_out_indices(['''b''', '''a'''] , (0, 1) , ['''a''', '''b'''] )
# Check passes with valid inputs
verify_out_features_out_indices(['''a''', '''b''', '''d'''] , (0, 1, -1) , ['''a''', '''b''', '''c''', '''d'''] )
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : List[Any] = BackboneMixin()
lowercase__ : List[str] = ['''a''', '''b''', '''c''']
lowercase__ : Any = ['''a''', '''c''']
lowercase__ : Union[str, Any] = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
lowercase__ : Tuple = ['''a''', '''b''']
self.assertEqual(backbone.out_features , ['''a''', '''b'''] )
self.assertEqual(backbone.out_indices , [0, 1] )
lowercase__ : str = [-3, -1]
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 152
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 42
class UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
@register_to_config
def __init__( self , __lowerCAmelCase = 3 , __lowerCAmelCase = 3 , __lowerCAmelCase = ("DownEncoderBlock2D",) , __lowerCAmelCase = ("UpDecoderBlock2D",) , __lowerCAmelCase = (64,) , __lowerCAmelCase = 1 , __lowerCAmelCase = "silu" , __lowerCAmelCase = 3 , __lowerCAmelCase = 32 , __lowerCAmelCase = 256 , __lowerCAmelCase = 32 , __lowerCAmelCase = None , __lowerCAmelCase = 0.1_8_2_1_5 , __lowerCAmelCase = "group" , ) -> Any:
super().__init__()
# pass init params to Encoder
lowercase__ : Union[str, Any] = Encoder(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , down_block_types=__lowerCAmelCase , block_out_channels=__lowerCAmelCase , layers_per_block=__lowerCAmelCase , act_fn=__lowerCAmelCase , norm_num_groups=__lowerCAmelCase , double_z=__lowerCAmelCase , )
lowercase__ : str = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowercase__ : Tuple = nn.Convad(__lowerCAmelCase , __lowerCAmelCase , 1 )
lowercase__ : Dict = VectorQuantizer(__lowerCAmelCase , __lowerCAmelCase , beta=0.2_5 , remap=__lowerCAmelCase , sane_index_shape=__lowerCAmelCase )
lowercase__ : List[Any] = nn.Convad(__lowerCAmelCase , __lowerCAmelCase , 1 )
# pass init params to Decoder
lowercase__ : Optional[Any] = Decoder(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , up_block_types=__lowerCAmelCase , block_out_channels=__lowerCAmelCase , layers_per_block=__lowerCAmelCase , act_fn=__lowerCAmelCase , norm_num_groups=__lowerCAmelCase , norm_type=__lowerCAmelCase , )
@apply_forward_hook
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = True ) -> VQEncoderOutput:
lowercase__ : Optional[int] = self.encoder(__lowerCAmelCase )
lowercase__ : Tuple = self.quant_conv(__lowerCAmelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__lowerCAmelCase )
@apply_forward_hook
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = False , __lowerCAmelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
lowercase__ , lowercase__ , lowercase__ : str = self.quantize(__lowerCAmelCase )
else:
lowercase__ : int = h
lowercase__ : Optional[int] = self.post_quant_conv(__lowerCAmelCase )
lowercase__ : Union[str, Any] = self.decoder(__lowerCAmelCase , quant if self.config.norm_type == '''spatial''' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
lowercase__ : List[str] = sample
lowercase__ : Optional[Any] = self.encode(__lowerCAmelCase ).latents
lowercase__ : str = self.decode(__lowerCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowerCAmelCase )
| 152
| 1
|
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
snake_case__ : Dict = logging.get_logger(__name__)
@add_end_docstrings(A__ )
class _a ( A__ ):
"""simple docstring"""
def __init__( self , *_snake_case , **_snake_case ):
super().__init__(*_snake_case , **_snake_case )
self.check_model_type(_snake_case )
def SCREAMING_SNAKE_CASE ( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ):
_UpperCAmelCase , _UpperCAmelCase ={}, {}
if padding is not None:
_UpperCAmelCase =padding
if truncation is not None:
_UpperCAmelCase =truncation
if top_k is not None:
_UpperCAmelCase =top_k
return preprocess_params, {}, postprocess_params
def __call__( self , _snake_case , _snake_case = None , **_snake_case ):
if isinstance(_snake_case , (Image.Image, str) ) and isinstance(_snake_case , _snake_case ):
_UpperCAmelCase ={"image": image, "question": question}
else:
_UpperCAmelCase =image
_UpperCAmelCase =super().__call__(_snake_case , **_snake_case )
return results
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case=False , _snake_case=False ):
_UpperCAmelCase =load_image(inputs["image"] )
_UpperCAmelCase =self.tokenizer(
inputs["question"] , return_tensors=self.framework , padding=_snake_case , truncation=_snake_case )
_UpperCAmelCase =self.image_processor(images=_snake_case , return_tensors=self.framework )
model_inputs.update(_snake_case )
return model_inputs
def SCREAMING_SNAKE_CASE ( self , _snake_case ):
_UpperCAmelCase =self.model(**_snake_case )
return model_outputs
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case=5 ):
if top_k > self.model.config.num_labels:
_UpperCAmelCase =self.model.config.num_labels
if self.framework == "pt":
_UpperCAmelCase =model_outputs.logits.sigmoid()[0]
_UpperCAmelCase , _UpperCAmelCase =probs.topk(_snake_case )
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
_UpperCAmelCase =scores.tolist()
_UpperCAmelCase =ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_snake_case , _snake_case )]
| 718
|
from __future__ import annotations
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->dict[str, float]:
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 592
| 0
|
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class _a ( unittest.TestCase ):
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_: Optional[int] = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ ) )
def __snake_case (self ) -> int:
UpperCAmelCase_: Optional[Any] = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ ) )
def __snake_case (self ) -> str:
UpperCAmelCase_: Any = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ ) )
def __snake_case (self ) -> Dict:
UpperCAmelCase_: Optional[int] = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ ) )
def __snake_case (self ) -> Dict:
UpperCAmelCase_: List[str] = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
# Removed: 'text_encoder/model.safetensors',
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertFalse(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ ) )
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: List[Any] = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
UpperCAmelCase_: Optional[int] = """fp16"""
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_, variant=SCREAMING_SNAKE_CASE_ ) )
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_: int = [
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
UpperCAmelCase_: Union[str, Any] = """fp16"""
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_, variant=SCREAMING_SNAKE_CASE_ ) )
def __snake_case (self ) -> Dict:
# pass variant but use the non-variant filenames
UpperCAmelCase_: List[Any] = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
UpperCAmelCase_: List[Any] = """fp16"""
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_, variant=SCREAMING_SNAKE_CASE_ ) )
def __snake_case (self ) -> Any:
UpperCAmelCase_: Tuple = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
UpperCAmelCase_: Optional[Any] = """fp16"""
self.assertFalse(is_safetensors_compatible(SCREAMING_SNAKE_CASE_, variant=SCREAMING_SNAKE_CASE_ ) )
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: Any = [
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
]
UpperCAmelCase_: Any = """fp16"""
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_, variant=SCREAMING_SNAKE_CASE_ ) )
def __snake_case (self ) -> Union[str, Any]:
# pass variant but use the non-variant filenames
UpperCAmelCase_: Tuple = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
UpperCAmelCase_: Tuple = """fp16"""
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_, variant=SCREAMING_SNAKE_CASE_ ) )
def __snake_case (self ) -> str:
UpperCAmelCase_: Union[str, Any] = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
# 'text_encoder/model.fp16.safetensors',
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
UpperCAmelCase_: Dict = """fp16"""
self.assertFalse(is_safetensors_compatible(SCREAMING_SNAKE_CASE_, variant=SCREAMING_SNAKE_CASE_ ) )
| 556
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
a : List[str] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _a ( _lowerCAmelCase ):
A = ['''pixel_values''']
def __init__(self, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = 1 / 255, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = True, **SCREAMING_SNAKE_CASE_, ) -> None:
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = size if size is not None else {"""shortest_edge""": 224}
UpperCAmelCase_: int = get_size_dict(SCREAMING_SNAKE_CASE_, default_to_square=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Dict = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
UpperCAmelCase_: Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE_, default_to_square=SCREAMING_SNAKE_CASE_, param_name="""crop_size""" )
UpperCAmelCase_: Optional[int] = do_resize
UpperCAmelCase_: Optional[int] = size
UpperCAmelCase_: Optional[int] = resample
UpperCAmelCase_: Tuple = do_center_crop
UpperCAmelCase_: Optional[int] = crop_size
UpperCAmelCase_: str = do_rescale
UpperCAmelCase_: Optional[int] = rescale_factor
UpperCAmelCase_: List[str] = do_normalize
UpperCAmelCase_: Any = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase_: Dict = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase_: Any = do_convert_rgb
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> np.ndarray:
UpperCAmelCase_: Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_, default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
UpperCAmelCase_: Optional[Any] = get_resize_output_image_size(SCREAMING_SNAKE_CASE_, size=size["""shortest_edge"""], default_to_square=SCREAMING_SNAKE_CASE_ )
return resize(SCREAMING_SNAKE_CASE_, size=SCREAMING_SNAKE_CASE_, resample=SCREAMING_SNAKE_CASE_, data_format=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> np.ndarray:
UpperCAmelCase_: Any = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(SCREAMING_SNAKE_CASE_, size=(size["""height"""], size["""width"""]), data_format=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> Union[str, Any]:
return rescale(SCREAMING_SNAKE_CASE_, scale=SCREAMING_SNAKE_CASE_, data_format=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> np.ndarray:
return normalize(SCREAMING_SNAKE_CASE_, mean=SCREAMING_SNAKE_CASE_, std=SCREAMING_SNAKE_CASE_, data_format=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST, **SCREAMING_SNAKE_CASE_, ) -> PIL.Image.Image:
UpperCAmelCase_: Tuple = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_: Tuple = size if size is not None else self.size
UpperCAmelCase_: str = get_size_dict(SCREAMING_SNAKE_CASE_, param_name="""size""", default_to_square=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[str] = resample if resample is not None else self.resample
UpperCAmelCase_: Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_: Tuple = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_: int = get_size_dict(SCREAMING_SNAKE_CASE_, param_name="""crop_size""", default_to_square=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_: int = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_: Tuple = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_: Dict = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_: int = image_std if image_std is not None else self.image_std
UpperCAmelCase_: Union[str, Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase_: List[str] = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase_: Any = [convert_to_rgb(SCREAMING_SNAKE_CASE_ ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase_: List[str] = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
UpperCAmelCase_: str = [self.resize(image=SCREAMING_SNAKE_CASE_, size=SCREAMING_SNAKE_CASE_, resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
UpperCAmelCase_: Dict = [self.center_crop(image=SCREAMING_SNAKE_CASE_, size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
UpperCAmelCase_: Optional[Any] = [self.rescale(image=SCREAMING_SNAKE_CASE_, scale=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_normalize:
UpperCAmelCase_: Dict = [self.normalize(image=SCREAMING_SNAKE_CASE_, mean=SCREAMING_SNAKE_CASE_, std=SCREAMING_SNAKE_CASE_ ) for image in images]
UpperCAmelCase_: Tuple = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) for image in images]
UpperCAmelCase_: Union[str, Any] = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_, tensor_type=SCREAMING_SNAKE_CASE_ )
| 556
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
snake_case_ : int = SwinvaConfig()
snake_case_ : Union[str, Any] = swinva_name.split("""_""" )
snake_case_ : List[Any] = name_split[1]
if "to" in name_split[3]:
snake_case_ : Union[str, Any] = int(name_split[3][-3:] )
else:
snake_case_ : Optional[int] = int(name_split[3] )
if "to" in name_split[2]:
snake_case_ : Dict = int(name_split[2][-2:] )
else:
snake_case_ : int = int(name_split[2][6:] )
if model_size == "tiny":
snake_case_ : str = 9_6
snake_case_ : Dict = (2, 2, 6, 2)
snake_case_ : int = (3, 6, 1_2, 2_4)
elif model_size == "small":
snake_case_ : Tuple = 9_6
snake_case_ : Optional[Any] = (2, 2, 1_8, 2)
snake_case_ : Optional[int] = (3, 6, 1_2, 2_4)
elif model_size == "base":
snake_case_ : Dict = 1_2_8
snake_case_ : str = (2, 2, 1_8, 2)
snake_case_ : List[Any] = (4, 8, 1_6, 3_2)
else:
snake_case_ : Tuple = 1_9_2
snake_case_ : Tuple = (2, 2, 1_8, 2)
snake_case_ : Optional[Any] = (6, 1_2, 2_4, 4_8)
if "to" in swinva_name:
snake_case_ : str = (1_2, 1_2, 1_2, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
snake_case_ : str = 2_1_8_4_1
snake_case_ : str = """huggingface/label-files"""
snake_case_ : List[str] = """imagenet-22k-id2label.json"""
snake_case_ : Union[str, Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="""dataset""" ) , """r""" ) )
snake_case_ : Optional[int] = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
snake_case_ : int = idalabel
snake_case_ : Optional[Any] = {v: k for k, v in idalabel.items()}
else:
snake_case_ : int = 1_0_0_0
snake_case_ : List[Any] = """huggingface/label-files"""
snake_case_ : List[Any] = """imagenet-1k-id2label.json"""
snake_case_ : Union[str, Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="""dataset""" ) , """r""" ) )
snake_case_ : Union[str, Any] = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
snake_case_ : Dict = idalabel
snake_case_ : Optional[int] = {v: k for k, v in idalabel.items()}
snake_case_ : Dict = img_size
snake_case_ : int = num_classes
snake_case_ : str = embed_dim
snake_case_ : Dict = depths
snake_case_ : int = num_heads
snake_case_ : Optional[Any] = window_size
return config
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
if "patch_embed.proj" in name:
snake_case_ : Union[str, Any] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
snake_case_ : Union[str, Any] = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
snake_case_ : List[str] = """encoder.""" + name
if "attn.proj" in name:
snake_case_ : Dict = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
snake_case_ : Dict = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
snake_case_ : List[Any] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
snake_case_ : Dict = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
snake_case_ : int = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
snake_case_ : List[Any] = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
snake_case_ : Optional[Any] = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
snake_case_ : List[Any] = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
snake_case_ : int = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
snake_case_ : str = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if name == "norm.weight":
snake_case_ : Tuple = """layernorm.weight"""
if name == "norm.bias":
snake_case_ : int = """layernorm.bias"""
if "head" in name:
snake_case_ : Union[str, Any] = name.replace("""head""" , """classifier""" )
else:
snake_case_ : Optional[Any] = """swinv2.""" + name
return name
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
snake_case_ : Tuple = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "mask" in key:
continue
elif "qkv" in key:
snake_case_ : List[Any] = key.split(""".""" )
snake_case_ : str = int(key_split[1] )
snake_case_ : Optional[Any] = int(key_split[3] )
snake_case_ : List[Any] = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
snake_case_ : Any = val[:dim, :]
snake_case_ : Optional[Any] = val[dim : dim * 2, :]
snake_case_ : int = val[-dim:, :]
else:
snake_case_ : List[str] = val[:dim]
snake_case_ : Tuple = val[
dim : dim * 2
]
snake_case_ : Union[str, Any] = val[-dim:]
else:
snake_case_ : int = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : Optional[Any] = timm.create_model(SCREAMING_SNAKE_CASE__ , pretrained=SCREAMING_SNAKE_CASE__ )
timm_model.eval()
snake_case_ : Any = get_swinva_config(SCREAMING_SNAKE_CASE__ )
snake_case_ : List[str] = SwinvaForImageClassification(SCREAMING_SNAKE_CASE__ )
model.eval()
snake_case_ : Any = convert_state_dict(timm_model.state_dict() , SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
snake_case_ : Any = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case_ : Optional[Any] = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swinva_name.replace("""_""" , """-""" ) ) )
snake_case_ : Dict = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
snake_case_ : Optional[Any] = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
snake_case_ : Optional[int] = timm_model(inputs["""pixel_values"""] )
snake_case_ : List[str] = model(**SCREAMING_SNAKE_CASE__ ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 )
print(f'Saving model {swinva_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , organization="""nandwalritik""" , commit_message="""Add model""" , )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swinv2_name''',
default='''swinv2_tiny_patch4_window8_256''',
type=str,
help='''Name of the Swinv2 timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
a_ = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 709
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
a_ = logging.get_logger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[int] = """upernet"""
def __init__(self , lowercase__=None , lowercase__=5_12 , lowercase__=0.02 , lowercase__=[1, 2, 3, 6] , lowercase__=True , lowercase__=0.4 , lowercase__=3_84 , lowercase__=2_56 , lowercase__=1 , lowercase__=False , lowercase__=2_55 , **lowercase__ , ):
super().__init__(**lowercase__ )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
snake_case_ : List[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(lowercase__ , lowercase__ ):
snake_case_ : Tuple = backbone_config.get("""model_type""" )
snake_case_ : List[str] = CONFIG_MAPPING[backbone_model_type]
snake_case_ : List[Any] = config_class.from_dict(lowercase__ )
snake_case_ : List[Any] = backbone_config
snake_case_ : Optional[Any] = hidden_size
snake_case_ : Any = initializer_range
snake_case_ : str = pool_scales
snake_case_ : Dict = use_auxiliary_head
snake_case_ : str = auxiliary_loss_weight
snake_case_ : List[str] = auxiliary_in_channels
snake_case_ : Optional[Any] = auxiliary_channels
snake_case_ : Any = auxiliary_num_convs
snake_case_ : List[Any] = auxiliary_concat_input
snake_case_ : List[str] = loss_ignore_index
def __UpperCamelCase (self ):
snake_case_ : Dict = copy.deepcopy(self.__dict__ )
snake_case_ : Union[str, Any] = self.backbone_config.to_dict()
snake_case_ : Any = self.__class__.model_type
return output
| 48
| 0
|
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''',
'''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''',
}
class a ( _lowerCamelCase ):
"""simple docstring"""
A__ : int = '''encodec'''
def __init__( self , snake_case_=[1.5, 3.0, 6.0, 12.0, 24.0] , snake_case_=24000 , snake_case_=1 , snake_case_=False , snake_case_=None , snake_case_=None , snake_case_=128 , snake_case_=32 , snake_case_=1 , snake_case_=[8, 5, 4, 2] , snake_case_="weight_norm" , snake_case_=7 , snake_case_=7 , snake_case_=3 , snake_case_=2 , snake_case_=True , snake_case_="reflect" , snake_case_=2 , snake_case_=2 , snake_case_=1.0 , snake_case_=1024 , snake_case_=None , snake_case_=True , **snake_case_ , ) -> List[Any]:
_UpperCAmelCase = target_bandwidths
_UpperCAmelCase = sampling_rate
_UpperCAmelCase = audio_channels
_UpperCAmelCase = normalize
_UpperCAmelCase = chunk_length_s
_UpperCAmelCase = overlap
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_filters
_UpperCAmelCase = num_residual_layers
_UpperCAmelCase = upsampling_ratios
_UpperCAmelCase = norm_type
_UpperCAmelCase = kernel_size
_UpperCAmelCase = last_kernel_size
_UpperCAmelCase = residual_kernel_size
_UpperCAmelCase = dilation_growth_rate
_UpperCAmelCase = use_causal_conv
_UpperCAmelCase = pad_mode
_UpperCAmelCase = compress
_UpperCAmelCase = num_lstm_layers
_UpperCAmelCase = trim_right_ratio
_UpperCAmelCase = codebook_size
_UpperCAmelCase = codebook_dim if codebook_dim is not None else hidden_size
_UpperCAmelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**_A )
@property
def __A ( self ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __A ( self ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __A ( self ) -> int:
_UpperCAmelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __A ( self ) -> int:
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 426
|
import unittest
from knapsack import greedy_knapsack as kp
class _A ( unittest.TestCase ):
def __a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase : Dict = [10, 20, 30, 40, 50, 60]
lowercase : Union[str, Any] = [2, 4, 6, 8, 10, 12]
lowercase : Optional[int] = 100
self.assertEqual(kp.calc_profit(_A , _A , _A ) , 210 )
def __a ( self : Dict ) -> int:
"""simple docstring"""
self.assertRaisesRegex(_A , '''max_weight must greater than zero.''' )
def __a ( self : str ) -> Dict:
"""simple docstring"""
self.assertRaisesRegex(_A , '''Weight can not be negative.''' )
def __a ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.assertRaisesRegex(_A , '''Profit can not be negative.''' )
def __a ( self : Tuple ) -> Dict:
"""simple docstring"""
self.assertRaisesRegex(_A , '''max_weight must greater than zero.''' )
def __a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
self.assertRaisesRegex(
_A , '''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 217
| 0
|
'''simple docstring'''
from __future__ import annotations
def __UpperCamelCase ( a : int ) ->list[int]:
snake_case = [True] * limit
snake_case = False
snake_case = False
snake_case = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
snake_case = i * 2
while index < limit:
snake_case = False
snake_case = index + i
snake_case = [2]
for i in range(3 , __lowerCAmelCase , 2 ):
if is_prime[i]:
primes.append(__lowerCAmelCase )
return primes
def __UpperCamelCase ( a : Tuple = 100_0000 ) ->int:
snake_case = prime_sieve(__lowerCAmelCase )
snake_case = 0
snake_case = 0
for i in range(len(__lowerCAmelCase ) ):
for j in range(i + length , len(__lowerCAmelCase ) ):
snake_case = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
snake_case = j - i
snake_case = sol
return largest
if __name__ == "__main__":
print(f'{solution() = }')
| 711
|
'''simple docstring'''
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 44
| 0
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
_lowerCamelCase = ['gpt2']
_lowerCamelCase = 'gpt2'
if is_tf_available():
class UpperCamelCase_ ( tf.Module ):
def __init__( self :Optional[Any] , __A :Union[str, Any] ) -> List[Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = tokenizer
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__A )
SCREAMING_SNAKE_CASE__ = TFGPTaLMHeadModel.from_config(__A )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="""text""" ),) )
def _snake_case ( self :Optional[int] , __A :int ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.tokenizer(__A )
SCREAMING_SNAKE_CASE__ = tokenized["""input_ids"""].to_tensor()
SCREAMING_SNAKE_CASE__ = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
SCREAMING_SNAKE_CASE__ = self.model(input_ids=__A , attention_mask=__A )["""logits"""]
return outputs
@require_tf
@require_keras_nlp
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :List[Any] ) -> Dict:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ = [GPTaTokenizer.from_pretrained(__A ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
SCREAMING_SNAKE_CASE__ = [TFGPTaTokenizer.from_pretrained(__A ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
SCREAMING_SNAKE_CASE__ = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
SCREAMING_SNAKE_CASE__ = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def _snake_case ( self :Any ) -> Any:
"""simple docstring"""
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
SCREAMING_SNAKE_CASE__ = tokenizer([test_inputs] , return_tensors="""tf""" )
SCREAMING_SNAKE_CASE__ = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
SCREAMING_SNAKE_CASE__ = python_outputs[key].numpy()
SCREAMING_SNAKE_CASE__ = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(__A , tf.intaa ) == tf_outputs_values ) )
@slow
def _snake_case ( self :Any ) -> int:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE__ = tf.function(__A )
for test_inputs in self.test_sentences:
SCREAMING_SNAKE_CASE__ = tf.constant(__A )
SCREAMING_SNAKE_CASE__ = compiled_tokenizer(__A )
SCREAMING_SNAKE_CASE__ = tf_tokenizer(__A )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _snake_case ( self :List[Any] ) -> Any:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE__ = ModelToSave(tokenizer=__A )
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor([self.test_sentences[0]] )
SCREAMING_SNAKE_CASE__ = model.serving(__A ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
SCREAMING_SNAKE_CASE__ = Path(__A ) / """saved.model"""
tf.saved_model.save(__A , __A , signatures={"""serving_default""": model.serving} )
SCREAMING_SNAKE_CASE__ = tf.saved_model.load(__A )
SCREAMING_SNAKE_CASE__ = loaded_model.signatures["""serving_default"""](__A )["""output_0"""]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def _snake_case ( self :Dict ) -> int:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor([self.test_sentences[0]] )
SCREAMING_SNAKE_CASE__ = tf_tokenizer(__A ) # Build model with some sample inputs
SCREAMING_SNAKE_CASE__ = tf_tokenizer.get_config()
SCREAMING_SNAKE_CASE__ = TFGPTaTokenizer.from_config(__A )
SCREAMING_SNAKE_CASE__ = model_from_config(__A )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def _snake_case ( self :List[Any] ) -> Any:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
SCREAMING_SNAKE_CASE__ = 12_3123
for max_length in [3, 5, 1024]:
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor([self.test_sentences[0]] )
SCREAMING_SNAKE_CASE__ = tf_tokenizer(__A , max_length=__A )
SCREAMING_SNAKE_CASE__ = out["""input_ids"""].numpy().shape[1]
assert out_length == max_length
| 6
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_lowerCamelCase = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 6
| 1
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowerCAmelCase_ ( lowercase_ ):
SCREAMING_SNAKE_CASE_ : Tuple = (DPMSolverSDEScheduler,)
SCREAMING_SNAKE_CASE_ : Dict = 1_0
def a_ ( self : Union[str, Any] , **UpperCAmelCase_ : Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**UpperCAmelCase_ )
return config
def a_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase_ )
def a_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=UpperCAmelCase_ , beta_end=UpperCAmelCase_ )
def a_ ( self : Any ) -> int:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=UpperCAmelCase_ )
def a_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase_ )
def a_ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = self.scheduler_classes[0]
_UpperCAmelCase : Optional[int] = self.get_scheduler_config()
_UpperCAmelCase : str = scheduler_class(**UpperCAmelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCAmelCase : List[Any] = self.dummy_model()
_UpperCAmelCase : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCAmelCase : Optional[int] = sample.to(UpperCAmelCase_ )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase : str = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCAmelCase : List[str] = model(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCAmelCase : Dict = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCAmelCase : Optional[int] = output.prev_sample
_UpperCAmelCase : int = torch.sum(torch.abs(UpperCAmelCase_ ) )
_UpperCAmelCase : Optional[Any] = torch.mean(torch.abs(UpperCAmelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_821_044_921_875 ) < 1E-2
assert abs(result_mean.item() - 0.2_178_705_964_565_277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_352_111_816_406 ) < 1E-2
assert abs(result_mean.item() - 0.22_342_906_892_299_652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3
def a_ ( self : Tuple ) -> str:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.scheduler_classes[0]
_UpperCAmelCase : Union[str, Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
_UpperCAmelCase : Any = scheduler_class(**UpperCAmelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCAmelCase : Tuple = self.dummy_model()
_UpperCAmelCase : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCAmelCase : Any = sample.to(UpperCAmelCase_ )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase : str = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCAmelCase : Optional[Any] = model(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCAmelCase : List[str] = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCAmelCase : str = output.prev_sample
_UpperCAmelCase : Union[str, Any] = torch.sum(torch.abs(UpperCAmelCase_ ) )
_UpperCAmelCase : List[str] = torch.mean(torch.abs(UpperCAmelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_149_200_439_453 ) < 1E-2
assert abs(result_mean.item() - 0.16_226_289_014_816_284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_663_360_595_703 ) < 1E-2
assert abs(result_mean.item() - 0.16_688_326_001_167_297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_487_548_828_125 ) < 1E-2
assert abs(result_mean.item() - 0.1_560_530_662_536_621 ) < 1E-3
def a_ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.scheduler_classes[0]
_UpperCAmelCase : Optional[int] = self.get_scheduler_config()
_UpperCAmelCase : Any = scheduler_class(**UpperCAmelCase_ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase_ )
_UpperCAmelCase : List[Any] = self.dummy_model()
_UpperCAmelCase : Optional[int] = self.dummy_sample_deter.to(UpperCAmelCase_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_UpperCAmelCase : Tuple = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCAmelCase : List[Any] = model(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCAmelCase : int = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCAmelCase : List[str] = output.prev_sample
_UpperCAmelCase : Any = torch.sum(torch.abs(UpperCAmelCase_ ) )
_UpperCAmelCase : Union[str, Any] = torch.mean(torch.abs(UpperCAmelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_957_397_460_938 ) < 1E-2
assert abs(result_mean.item() - 0.21_805_934_607_982_635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_353_637_695_312 ) < 1E-2
assert abs(result_mean.item() - 0.22_342_908_382_415_771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3
def a_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : str = self.scheduler_classes[0]
_UpperCAmelCase : Optional[int] = self.get_scheduler_config()
_UpperCAmelCase : int = scheduler_class(**UpperCAmelCase_ , use_karras_sigmas=UpperCAmelCase_ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase_ )
_UpperCAmelCase : Dict = self.dummy_model()
_UpperCAmelCase : int = self.dummy_sample_deter.to(UpperCAmelCase_ ) * scheduler.init_noise_sigma
_UpperCAmelCase : Optional[int] = sample.to(UpperCAmelCase_ )
for t in scheduler.timesteps:
_UpperCAmelCase : Optional[Any] = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCAmelCase : Dict = model(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCAmelCase : Optional[int] = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCAmelCase : Tuple = output.prev_sample
_UpperCAmelCase : List[Any] = torch.sum(torch.abs(UpperCAmelCase_ ) )
_UpperCAmelCase : List[Any] = torch.mean(torch.abs(UpperCAmelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_974_135_742_188 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_653_564_453_125 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_135_223_388_672 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
| 709
|
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
UpperCAmelCase__ : Dict = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowercase_ ):
def __init__( self : List[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[Any] ) -> None:
'''simple docstring'''
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 416
| 0
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( UpperCamelCase__ ):
_lowercase : str = ['''image_processor''', '''tokenizer''']
_lowercase : Any = '''CLIPImageProcessor'''
_lowercase : str = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''')
def __init__( self: str , UpperCamelCase_: Any=None , UpperCamelCase_: Optional[int]=None , **UpperCamelCase_: List[str] ) -> Dict:
"""simple docstring"""
lowercase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCamelCase_ , )
lowercase__ = kwargs.pop('''feature_extractor''' )
lowercase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCamelCase_ , UpperCamelCase_ )
def __call__( self: int , UpperCamelCase_: Optional[Any]=None , UpperCamelCase_: Tuple=None , UpperCamelCase_: Tuple=None , **UpperCamelCase_: Union[str, Any] ) -> Tuple:
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowercase__ = self.tokenizer(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
if images is not None:
lowercase__ = self.image_processor(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
if text is not None and images is not None:
lowercase__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase_ ) , tensor_type=UpperCamelCase_ )
def lowerCamelCase_ ( self: Any , *UpperCamelCase_: Optional[Any] , **UpperCamelCase_: int ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase_ ( self: int , *UpperCamelCase_: str , **UpperCamelCase_: List[Any] ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ )
@property
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.tokenizer.model_input_names
lowercase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 43
|
from __future__ import annotations
def lowerCamelCase__ ( __A :list[float] ,__A :Union[str, Any] ):
"""simple docstring"""
print(F'Vertex\tShortest Distance from vertex {src}' )
for i, d in enumerate(__A ):
print(F'{i}\t\t{d}' )
def lowerCamelCase__ ( __A :list[dict[str, int]] ,__A :list[float] ,__A :int ):
"""simple docstring"""
for j in range(__A ):
__snake_case , __snake_case , __snake_case = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
return True
return False
def lowerCamelCase__ ( __A :list[dict[str, int]] ,__A :int ,__A :int ,__A :int ):
"""simple docstring"""
__snake_case = [float("""inf""" )] * vertex_count
__snake_case = 0.0
for _ in range(vertex_count - 1 ):
for j in range(__A ):
__snake_case , __snake_case , __snake_case = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
__snake_case = distance[u] + w
__snake_case = check_negative_cycle(__A ,__A ,__A )
if negative_cycle_exists:
raise Exception("""Negative cycle found""" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = int(input('''Enter number of vertices: ''').strip())
UpperCamelCase__ = int(input('''Enter number of edges: ''').strip())
UpperCamelCase__ = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
UpperCamelCase__ = {'''src''': src, '''dst''': dest, '''weight''': weight}
UpperCamelCase__ = int(input('''\nEnter shortest path source:''').strip())
UpperCamelCase__ = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 268
| 0
|
import math
import unittest
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> bool:
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
with self.assertRaises(_a ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , '''Zero doesn\'t have any positive factors, primes must have exactly two.''' , )
self.assertFalse(
is_prime(1 ) , '''One only has 1 positive factor, primes must have exactly two.''' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 713
|
from string import ascii_lowercase, ascii_uppercase
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> str:
if not sentence:
return ""
snake_case__ = dict(zip(__lowerCAmelCase , __lowerCAmelCase ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 208
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.