code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=_UpperCamelCase)
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : str = field(default="""image-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True})
__magic_name__ : ClassVar[Features] = Features({"""image""": Image()})
__magic_name__ : ClassVar[Features] = Features({"""labels""": ClassLabel})
__magic_name__ : str = "image"
__magic_name__ : str = "labels"
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : List[Any] ):
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , UpperCamelCase__ ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
A__ : Any =copy.deepcopy(self )
A__ : List[Any] =self.label_schema.copy()
A__ : Optional[int] =features[self.label_column]
A__ : Optional[Any] =label_schema
return task_template
@property
def _UpperCAmelCase ( self : Dict ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 656 | """simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__A : Any = logging.get_logger(__name__)
__A : Optional[Any] = {
"EleutherAI/gpt-neo-1.3B": "https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Union[str, Any] = """gpt_neo"""
__magic_name__ : Union[str, Any] = ["""past_key_values"""]
__magic_name__ : Dict = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self : Dict , UpperCamelCase__ : List[Any]=50257 , UpperCamelCase__ : Optional[Any]=2048 , UpperCamelCase__ : Tuple=2048 , UpperCamelCase__ : int=24 , UpperCamelCase__ : Dict=[[["global", "local"], 12]] , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : str=256 , UpperCamelCase__ : List[str]="gelu_new" , UpperCamelCase__ : List[str]=0.0 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : List[str]=1E-5 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[Any]=50256 , UpperCamelCase__ : List[str]=50256 , **UpperCamelCase__ : str , ):
A__ : Optional[Any] =vocab_size
A__ : Dict =max_position_embeddings
A__ : List[str] =hidden_size
A__ : List[Any] =num_layers
A__ : Tuple =num_heads
A__ : List[str] =intermediate_size
A__ : Tuple =window_size
A__ : Dict =activation_function
A__ : str =resid_dropout
A__ : Union[str, Any] =embed_dropout
A__ : List[str] =attention_dropout
A__ : Tuple =classifier_dropout
A__ : int =layer_norm_epsilon
A__ : int =initializer_range
A__ : str =use_cache
A__ : Tuple =bos_token_id
A__ : int =eos_token_id
A__ : int =attention_types
A__ : Any =self.expand_attention_types_params(UpperCamelCase__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
@staticmethod
def _UpperCAmelCase ( UpperCamelCase__ : List[str] ):
A__ : Optional[Any] =[]
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowercase ( UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ):
"""simple docstring"""
import torch
A__ : List[str] =input.size()
A__ : Dict =len(UpperCamelCase )
A__ : Optional[int] =shape[dimension]
A__ : str =torch.arange(0 , UpperCamelCase , UpperCamelCase )
A__ : Optional[int] =torch.div(sizedim - size , UpperCamelCase , rounding_mode="floor" ) + 1
A__ : str =torch.arange(UpperCamelCase ) + low_indices[:min_length][:, None]
A__ : Tuple =[slice(UpperCamelCase )] * rank
A__ : int =indices
A__ : Optional[int] =input[s]
A__ : Union[str, Any] =list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(UpperCamelCase )
def lowercase ( UpperCamelCase : str , UpperCamelCase : Any ):
"""simple docstring"""
import torch
A__ : List[str] =torch.arange(1 , UpperCamelCase )
A__ : List[Any] =torch.remainder(UpperCamelCase , UpperCamelCase )
A__ : Optional[int] =remainders == 0
A__ : str =candidates[divisor_indices]
A__ : int =torch.max(UpperCamelCase )
return largest_divisor, torch.div(UpperCamelCase , UpperCamelCase , rounding_mode="floor" )
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
@property
def _UpperCAmelCase ( self : List[Any] ):
A__ : Optional[int] =OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction="inputs" )
A__ : Optional[int] ={0: "batch", 1: "past_sequence + sequence"}
else:
A__ : Tuple ={0: "batch", 1: "sequence"}
return common_inputs
@property
def _UpperCAmelCase ( self : List[str] ):
return self._config.num_heads
def _UpperCAmelCase ( self : int , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
A__ : Union[str, Any] =super(UpperCamelCase__ , self ).generate_dummy_inputs(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
# We need to order the input in the way they appears in the forward()
A__ : List[Any] =OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A__ , A__ : Union[str, Any] =common_inputs["input_ids"].shape
# Not using the same length for past_key_values
A__ : Union[str, Any] =seqlen + 2
A__ : List[Any] =(
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
A__ : Optional[Any] =[
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(self.num_layers )
]
A__ : Optional[Any] =common_inputs["attention_mask"]
if self.use_past:
A__ : Any =ordered_inputs["attention_mask"].dtype
A__ : Tuple =torch.cat(
[ordered_inputs["attention_mask"], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
return ordered_inputs
@property
def _UpperCAmelCase ( self : List[str] ):
return 13
| 656 | 1 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A : Optional[Any] = {
"configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"],
"tokenization_cpmant": ["CpmAntTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
"CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CpmAntForCausalLM",
"CpmAntModel",
"CpmAntPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
__A : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 656 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Union[str, Any] = logging.get_logger(__name__)
__A : Any = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Tuple = """megatron-bert"""
def __init__( self : Tuple , UpperCamelCase__ : Dict=29056 , UpperCamelCase__ : int=1024 , UpperCamelCase__ : Optional[int]=24 , UpperCamelCase__ : Dict=16 , UpperCamelCase__ : int=4096 , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : int=512 , UpperCamelCase__ : str=2 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : str="absolute" , UpperCamelCase__ : Dict=True , **UpperCamelCase__ : Tuple , ):
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A__ : Optional[int] =vocab_size
A__ : Optional[int] =hidden_size
A__ : str =num_hidden_layers
A__ : Any =num_attention_heads
A__ : str =hidden_act
A__ : Optional[int] =intermediate_size
A__ : str =hidden_dropout_prob
A__ : str =attention_probs_dropout_prob
A__ : List[Any] =max_position_embeddings
A__ : List[Any] =type_vocab_size
A__ : Tuple =initializer_range
A__ : Any =layer_norm_eps
A__ : Any =position_embedding_type
A__ : Union[str, Any] =use_cache
| 656 | 1 |
"""simple docstring"""
__A : dict[str, float] = {
"joule": 1.0,
"kilojoule": 1_000,
"megajoule": 1_000_000,
"gigajoule": 1_000_000_000,
"wattsecond": 1.0,
"watthour": 3_600,
"kilowatthour": 3_600_000,
"newtonmeter": 1.0,
"calorie_nutr": 4_186.8,
"kilocalorie_nutr": 4_186_800.00,
"electronvolt": 1.6_0217_6634e-19,
"britishthermalunit_it": 1_055.05_585,
"footpound": 1.35_5818,
}
def lowercase ( UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : float ):
"""simple docstring"""
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
A__ : List[Any] =(
F'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
F'''Valid values are: {", ".join(UpperCamelCase )}'''
)
raise ValueError(UpperCamelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 656 | """simple docstring"""
from __future__ import annotations
def lowercase ( UpperCamelCase : list[float] ):
"""simple docstring"""
if len(UpperCamelCase ) < 2:
raise ValueError("Monogons and Digons are not polygons in the Euclidean space" )
if any(i <= 0 for i in nums ):
raise ValueError("All values must be greater than 0" )
A__ : Union[str, Any] =nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 656 | 1 |
"""simple docstring"""
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def lowercase ( UpperCamelCase : Tuple ): # picklable for multiprocessing
"""simple docstring"""
return x.sum()
def lowercase ( UpperCamelCase : Union[str, Any] ): # picklable for multiprocessing
"""simple docstring"""
return i + 1
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__magic_name__ : int
__magic_name__ : str
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def _UpperCAmelCase ( self : Tuple ):
A__ : Tuple ={}
A__ : Dict =[]
A__ : Any =1
A__ : Dict =[1, 2]
A__ : List[str] ={"a": 1, "b": 2}
A__ : int ={"a": [1, 2], "b": [3, 4]}
A__ : int ={"a": {"1": 1}, "b": 2}
A__ : Any ={"a": 1, "b": 2, "c": 3, "d": 4}
A__ : Optional[Any] ={}
A__ : Optional[int] =[]
A__ : Optional[Any] =2
A__ : List[Any] =[2, 3]
A__ : Optional[Any] ={"a": 2, "b": 3}
A__ : Optional[int] ={"a": [2, 3], "b": [4, 5]}
A__ : Optional[Any] ={"a": {"1": 2}, "b": 3}
A__ : List[str] ={"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
A__ : int =2
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ , num_proc=UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ , num_proc=UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ , num_proc=UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ , num_proc=UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ , num_proc=UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ , num_proc=UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ , num_proc=UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ , num_proc=UpperCamelCase__ ) , UpperCamelCase__ )
A__ : Optional[Any] ={"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
A__ : Optional[int] ={"a": 2, "b": 0, "c": 2}
A__ : int ={
"a": np.eye(2 ).astype(UpperCamelCase__ ),
"b": np.zeros(3 ).astype(UpperCamelCase__ ),
"c": np.ones(2 ).astype(UpperCamelCase__ ),
}
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ , map_numpy=UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(UpperCamelCase__ , UpperCamelCase__ , map_numpy=UpperCamelCase__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ , map_numpy=UpperCamelCase__ , num_proc=UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(UpperCamelCase__ , UpperCamelCase__ , map_numpy=UpperCamelCase__ , num_proc=UpperCamelCase__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(UpperCamelCase__ ): # can't pickle a local lambda
map_nested(lambda UpperCamelCase__ : x + 1 , UpperCamelCase__ , num_proc=UpperCamelCase__ )
def _UpperCAmelCase ( self : List[str] ):
A__ : str ={"a": 1, "b": 2}
A__ : Optional[Any] ={"a": 3, "b": 4}
A__ : Dict ={"a": 5, "b": 6}
A__ : Optional[Any] =sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ) , UpperCamelCase__ )
def _UpperCAmelCase ( self : Any ):
class __lowerCAmelCase :
'''simple docstring'''
__magic_name__ : List[Any] = """bar"""
A__ : str =Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(UpperCamelCase__ , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def lowercase ( UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[Any] ):
"""simple docstring"""
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
A__ : Dict ={F'''{i}''': i for i in range(UpperCamelCase )}
A__ : Union[str, Any] =map_nested(lambda UpperCamelCase : x + 10 , UpperCamelCase , num_proc=UpperCamelCase , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
@require_tf
def _UpperCAmelCase ( self : Union[str, Any] ):
import tensorflow as tf
from tensorflow.keras import layers
A__ : Optional[Any] =layers.Dense(2 )
def gen_random_output():
A__ : Optional[Any] =tf.random.uniform((1, 3) )
return model(UpperCamelCase__ ).numpy()
with temp_seed(42 , set_tensorflow=UpperCamelCase__ ):
A__ : Dict =gen_random_output()
with temp_seed(42 , set_tensorflow=UpperCamelCase__ ):
A__ : Optional[int] =gen_random_output()
A__ : Union[str, Any] =gen_random_output()
np.testing.assert_equal(UpperCamelCase__ , UpperCamelCase__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def _UpperCAmelCase ( self : Optional[Any] ):
import torch
def gen_random_output():
A__ : Optional[Any] =torch.nn.Linear(3 , 2 )
A__ : str =torch.rand(1 , 3 )
return model(UpperCamelCase__ ).detach().numpy()
with temp_seed(42 , set_pytorch=UpperCamelCase__ ):
A__ : Optional[Any] =gen_random_output()
with temp_seed(42 , set_pytorch=UpperCamelCase__ ):
A__ : str =gen_random_output()
A__ : Union[str, Any] =gen_random_output()
np.testing.assert_equal(UpperCamelCase__ , UpperCamelCase__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def _UpperCAmelCase ( self : Any ):
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
A__ : int =gen_random_output()
with temp_seed(42 ):
A__ : Optional[int] =gen_random_output()
A__ : Optional[int] =gen_random_output()
np.testing.assert_equal(UpperCamelCase__ , UpperCamelCase__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data" , [{}] )
def lowercase ( UpperCamelCase : Tuple ):
"""simple docstring"""
A__ : str =NestedDataStructure(UpperCamelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" , [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] , )
def lowercase ( UpperCamelCase : Union[str, Any] , UpperCamelCase : str ):
"""simple docstring"""
A__ : Tuple =NestedDataStructure(UpperCamelCase ).flatten()
assert output == expected_output
def lowercase ( ):
"""simple docstring"""
A__ : Dict =A(x=1 , y="foobar" )
A__ : Dict ={"x": 1, "y": "foobar"}
assert asdict(UpperCamelCase ) == expected_output
A__ : Dict ={"a": {"b": A(x=10 , y="foo" )}, "c": [A(x=20 , y="bar" )]}
A__ : str ={"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(UpperCamelCase ) == expected_output
with pytest.raises(UpperCamelCase ):
asdict([1, A(x=10 , y="foo" )] )
def lowercase ( UpperCamelCase : str ):
"""simple docstring"""
return text.split()
def lowercase ( UpperCamelCase : str ):
"""simple docstring"""
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def lowercase ( ):
"""simple docstring"""
with Pool(2 ) as pool:
A__ : Optional[int] =list(iflatmap_unordered(UpperCamelCase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(UpperCamelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
A__ : Union[str, Any] =list(iflatmap_unordered(UpperCamelCase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(UpperCamelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
A__ : List[Any] =[]
for yield_time, content in iflatmap_unordered(
UpperCamelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(UpperCamelCase )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(UpperCamelCase ) == 4
| 656 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__A : Optional[Any] = {
"configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
"MEGA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegaForCausalLM",
"MegaForMaskedLM",
"MegaForMultipleChoice",
"MegaForQuestionAnswering",
"MegaForSequenceClassification",
"MegaForTokenClassification",
"MegaModel",
"MegaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__A : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 656 | 1 |
"""simple docstring"""
def lowercase ( UpperCamelCase : int = 1000000 ):
"""simple docstring"""
A__ : Optional[Any] =set(range(3 , UpperCamelCase , 2 ) )
primes.add(2 )
for p in range(3 , UpperCamelCase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , UpperCamelCase , UpperCamelCase ) ) )
A__ : int =[float(UpperCamelCase ) for n in range(limit + 1 )]
for p in primes:
for n in range(UpperCamelCase , limit + 1 , UpperCamelCase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 656 | """simple docstring"""
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
if num <= 0:
raise ValueError("Input must be a positive integer" )
A__ : Union[str, Any] =[True] * (num + 1)
A__ : Union[str, Any] =2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , UpperCamelCase ):
A__ : str =False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Optional[int] = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 656 | 1 |
"""simple docstring"""
def lowercase ( UpperCamelCase : str , UpperCamelCase : str ):
"""simple docstring"""
A__ : str =len(UpperCamelCase )
A__ : Optional[int] =len(UpperCamelCase )
A__ : Dict =[[False for _ in range(m + 1 )] for _ in range(n + 1 )]
A__ : Dict =True
for i in range(UpperCamelCase ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
A__ : str =True
if a[i].islower():
A__ : str =True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 656 | """simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
def _UpperCAmelCase ( self : List[Any] ):
A__ : Tuple =torch.nn.Linear(10 , 10 )
A__ : List[str] =torch.optim.SGD(model.parameters() , 0.1 )
A__ : Union[str, Any] =Accelerator()
A__ : str =accelerator.prepare(UpperCamelCase__ )
try:
pickle.loads(pickle.dumps(UpperCamelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 656 | 1 |
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase):
'''simple docstring'''
__magic_name__ : List[Any] = [R"""h\.\d+\.attn\.bias""", R"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 50257 , UpperCamelCase__ : int = 1024 , UpperCamelCase__ : int = 768 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str = "gelu_new" , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 1E-5 , UpperCamelCase__ : float = 0.02 , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , ):
super().__init__()
A__ : Dict =prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
F''' `n_embd`: {n_embd} are not equal.''' )
A__ : Optional[int] =prefix_inner_dim
A__ : Optional[int] =prefix_hidden_dim
A__ : Optional[int] =(
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A__ : Optional[int] =(
nn.Linear(self.prefix_hidden_dim , UpperCamelCase__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
A__ : str =GPTaConfig(
vocab_size=UpperCamelCase__ , n_positions=UpperCamelCase__ , n_embd=UpperCamelCase__ , n_layer=UpperCamelCase__ , n_head=UpperCamelCase__ , n_inner=UpperCamelCase__ , activation_function=UpperCamelCase__ , resid_pdrop=UpperCamelCase__ , embd_pdrop=UpperCamelCase__ , attn_pdrop=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ , initializer_range=UpperCamelCase__ , scale_attn_weights=UpperCamelCase__ , use_cache=UpperCamelCase__ , scale_attn_by_inverse_layer_idx=UpperCamelCase__ , reorder_and_upcast_attn=UpperCamelCase__ , )
A__ : Any =GPTaLMHeadModel(UpperCamelCase__ )
def _UpperCAmelCase ( self : Any , UpperCamelCase__ : torch.Tensor , UpperCamelCase__ : torch.Tensor , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[torch.Tensor] = None , ):
A__ : int =self.transformer.transformer.wte(UpperCamelCase__ )
A__ : Tuple =self.encode_prefix(UpperCamelCase__ )
A__ : Union[str, Any] =self.decode_prefix(UpperCamelCase__ )
A__ : Tuple =torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
A__ : Any =self.get_dummy_token(input_ids.shape[0] , input_ids.device )
A__ : List[Any] =torch.cat((dummy_token, input_ids) , dim=1 )
A__ : Any =self.transformer(inputs_embeds=UpperCamelCase__ , labels=UpperCamelCase__ , attention_mask=UpperCamelCase__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : torch.device ):
return torch.zeros(UpperCamelCase__ , self.prefix_length , dtype=torch.intaa , device=UpperCamelCase__ )
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Tuple ):
return self.encode_prefix(UpperCamelCase__ )
@torch.no_grad()
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str ):
A__ : Optional[int] =torch.split(UpperCamelCase__ , 1 , dim=0 )
A__ : List[str] =[]
A__ : Dict =[]
for feature in features:
A__ : Any =self.decode_prefix(feature.to(UpperCamelCase__ ) ) # back to the clip feature
# Only support beam search for now
A__ , A__ : Optional[Any] =self.generate_beam(
input_embeds=UpperCamelCase__ , device=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
A__ : Optional[Any] =torch.stack(UpperCamelCase__ )
A__ : Optional[int] =torch.stack(UpperCamelCase__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def _UpperCAmelCase ( self : List[Any] , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : int = 5 , UpperCamelCase__ : int = 67 , UpperCamelCase__ : float = 1.0 , UpperCamelCase__ : Optional[int] = None , ):
A__ : str =eos_token_id
A__ : Optional[Any] =None
A__ : int =None
A__ : Union[str, Any] =torch.ones(UpperCamelCase__ , device=UpperCamelCase__ , dtype=torch.int )
A__ : Any =torch.zeros(UpperCamelCase__ , device=UpperCamelCase__ , dtype=torch.bool )
if input_embeds is not None:
A__ : Union[str, Any] =input_embeds
else:
A__ : Optional[Any] =self.transformer.transformer.wte(UpperCamelCase__ )
for i in range(UpperCamelCase__ ):
A__ : Optional[int] =self.transformer(inputs_embeds=UpperCamelCase__ )
A__ : Tuple =outputs.logits
A__ : Union[str, Any] =logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A__ : Optional[Any] =logits.softmax(-1 ).log()
if scores is None:
A__ , A__ : Union[str, Any] =logits.topk(UpperCamelCase__ , -1 )
A__ : Union[str, Any] =generated.expand(UpperCamelCase__ , *generated.shape[1:] )
A__ , A__ : Optional[int] =next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
A__ : str =next_tokens
else:
A__ : Optional[Any] =tokens.expand(UpperCamelCase__ , *tokens.shape[1:] )
A__ : str =torch.cat((tokens, next_tokens) , dim=1 )
else:
A__ : Union[str, Any] =-float(np.inf )
A__ : Dict =0
A__ : Optional[Any] =scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A__ : Optional[Any] =scores_sum / seq_lengths[:, None]
A__ , A__ : List[Any] =scores_sum_average.view(-1 ).topk(UpperCamelCase__ , -1 )
A__ : Tuple =next_tokens // scores_sum.shape[1]
A__ : List[Any] =seq_lengths[next_tokens_source]
A__ : int =next_tokens % scores_sum.shape[1]
A__ : str =next_tokens.unsqueeze(1 )
A__ : List[Any] =tokens[next_tokens_source]
A__ : int =torch.cat((tokens, next_tokens) , dim=1 )
A__ : List[str] =generated[next_tokens_source]
A__ : Optional[Any] =scores_sum_average * seq_lengths
A__ : Optional[int] =is_stopped[next_tokens_source]
A__ : List[str] =self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
A__ : str =torch.cat((generated, next_token_embed) , dim=1 )
A__ : str =is_stopped + next_tokens.eq(UpperCamelCase__ ).squeeze()
if is_stopped.all():
break
A__ : Optional[int] =scores / seq_lengths
A__ : List[Any] =scores.argsort(descending=UpperCamelCase__ )
# tokens tensors are already padded to max_seq_length
A__ : int =[tokens[i] for i in order]
A__ : Any =torch.stack(UpperCamelCase__ , dim=0 )
A__ : int =torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 656 | """simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__A : Optional[int] = None
__A : Union[str, Any] = logging.get_logger(__name__)
__A : List[Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__A : str = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
},
"tokenizer_file": {
"google/bigbird-roberta-base": (
"https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"
),
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"
),
},
}
__A : List[str] = {
"google/bigbird-roberta-base": 4_096,
"google/bigbird-roberta-large": 4_096,
"google/bigbird-base-trivia-itc": 4_096,
}
__A : Tuple = "▁"
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Dict = VOCAB_FILES_NAMES
__magic_name__ : Any = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : List[Any] = BigBirdTokenizer
__magic_name__ : Any = ["""input_ids""", """attention_mask"""]
__magic_name__ : List[int] = []
def __init__( self : str , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Union[str, Any]="<unk>" , UpperCamelCase__ : str="<s>" , UpperCamelCase__ : int="</s>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[Any]="[SEP]" , UpperCamelCase__ : List[Any]="[MASK]" , UpperCamelCase__ : str="[CLS]" , **UpperCamelCase__ : List[Any] , ):
A__ : Optional[int] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else bos_token
A__ : Optional[Any] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token
A__ : Optional[int] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else unk_token
A__ : int =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token
A__ : str =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cls_token
A__ : List[Any] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
A__ : str =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , **UpperCamelCase__ , )
A__ : List[Any] =vocab_file
A__ : Optional[int] =False if not self.vocab_file else True
def _UpperCAmelCase ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : Tuple =[self.sep_token_id]
A__ : str =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1]
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : Tuple =[self.sep_token_id]
A__ : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A__ : List[str] =os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
return (out_vocab_file,)
| 656 | 1 |
"""simple docstring"""
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
__A : List[Any] = [
"word_embeddings_layernorm.weight",
"word_embeddings_layernorm.bias",
"input_layernorm.weight",
"input_layernorm.bias",
"post_attention_layernorm.weight",
"post_attention_layernorm.bias",
"self_attention.dense.bias",
"mlp.dense_4h_to_h.bias",
"ln_f.weight",
"ln_f.bias",
]
__A : List[Any] = [
"mlp.dense_4h_to_h.weight",
"self_attention.dense.weight",
]
def lowercase ( UpperCamelCase : int , UpperCamelCase : int ):
"""simple docstring"""
A__ : Optional[Any] ={
"word_embeddings.weight": "word_embeddings.weight",
"word_embeddings.norm.weight": "word_embeddings_layernorm.weight",
"word_embeddings.norm.bias": "word_embeddings_layernorm.bias",
"weight": "ln_f.weight",
"bias": "ln_f.bias",
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
A__ : str =int(re.match(R".*layer_(\d*).*" , UpperCamelCase )[1] )
layer_number -= 3
return F'''h.{layer_number}.''' + key
def lowercase ( UpperCamelCase : List[Any] ):
"""simple docstring"""
if dtype == torch.bool:
return 1 / 8
A__ : Optional[int] =re.search(R"[^\d](\d+)$" , str(UpperCamelCase ) )
if bit_search is None:
raise ValueError(F'''`dtype` is not a valid dtype: {dtype}.''' )
A__ : List[str] =int(bit_search.groups()[0] )
return bit_size // 8
def lowercase ( UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : List[str] , UpperCamelCase : List[str] ):
"""simple docstring"""
# Construct model
if bloom_config_file == "":
A__ : Any =BloomConfig()
else:
A__ : List[str] =BloomConfig.from_json_file(UpperCamelCase )
if shard_model:
A__ : str =os.listdir(UpperCamelCase )
A__ : str =sorted(filter(lambda UpperCamelCase : s.startswith("layer" ) and "model_00" in s , UpperCamelCase ) )
A__ : Union[str, Any] ={"weight_map": {}, "metadata": {}}
A__ : Tuple =0
A__ : Any =None
A__ : Optional[int] =BloomConfig()
for j, file in enumerate(UpperCamelCase ):
print("Processing file: {}".format(UpperCamelCase ) )
A__ : Union[str, Any] =None
for i in range(UpperCamelCase ):
# load all TP files
A__ : Optional[Any] =file.replace("model_00" , F'''model_0{i}''' )
A__ : Optional[Any] =torch.load(os.path.join(UpperCamelCase , UpperCamelCase ) , map_location="cpu" )
# Rename keys in the transformers names
A__ : Tuple =list(temp.keys() )
for key in keys:
A__ : Dict =temp.pop(UpperCamelCase )
if tensors is None:
A__ : Tuple =temp
else:
for key in tensors.keys():
if any(key.endswith(UpperCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
A__ : List[Any] =1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
A__ : Tuple =torch.cat([tensors[key], temp[key]] , dim=UpperCamelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(UpperCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
A__ : List[str] =tensors[key] / pretraining_tp
torch.save(
UpperCamelCase , os.path.join(
UpperCamelCase , "pytorch_model_{}-of-{}.bin".format(str(j + 1 ).zfill(5 ) , str(len(UpperCamelCase ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
A__ : Any =tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
A__ : str ="pytorch_model_{}-of-{}.bin".format(
str(j + 1 ).zfill(5 ) , str(len(UpperCamelCase ) ).zfill(5 ) )
A__ : Any =BloomConfig()
A__ : Any =pytorch_dump_folder_path + "/" + CONFIG_NAME
A__ : Optional[int] =total_size
with open(UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
with open(os.path.join(UpperCamelCase , WEIGHTS_NAME + ".index.json" ) , "w" , encoding="utf-8" ) as f:
A__ : int =json.dumps(UpperCamelCase , indent=2 , sort_keys=UpperCamelCase ) + "\n"
f.write(UpperCamelCase )
else:
A__ : str =BloomModel(UpperCamelCase )
A__ : Tuple =os.listdir(UpperCamelCase )
A__ : List[str] =sorted(filter(lambda UpperCamelCase : s.startswith("layer" ) and "model_00" in s , UpperCamelCase ) )
A__ : Tuple =None
for i, file in enumerate(UpperCamelCase ):
A__ : str =None
for i in range(UpperCamelCase ):
# load all TP files
A__ : List[Any] =file.replace("model_00" , F'''model_0{i}''' )
A__ : int =torch.load(os.path.join(UpperCamelCase , UpperCamelCase ) , map_location="cpu" )
# Rename keys in the transformers names
A__ : Optional[Any] =list(temp.keys() )
for key in keys:
A__ : int =temp.pop(UpperCamelCase )
if tensors is None:
A__ : Tuple =temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(UpperCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
A__ : List[Any] =1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
A__ : str =torch.cat([tensors[key], temp[key]] , dim=UpperCamelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(UpperCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
A__ : Optional[int] =tensors[key] / pretraining_tp
A__ : Optional[Any] =model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
assert not other_keys.unexpected_keys, F'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
A__ : Optional[int] =set(other_keys.missing_keys )
else:
A__ : Optional[int] =missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
A__ : Tuple =pytorch_dump_folder_path + "/" + WEIGHTS_NAME
A__ : Any =pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
A__ : Union[str, Any] =model.to(config.torch_dtype )
torch.save(model.state_dict() , UpperCamelCase )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__A : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bloom_checkpoint_path",
default=None,
type=str,
required=True,
help="Path to the Megatron-LM checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--bloom_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--shard_model",
action="store_true",
help="An optional setting to shard the output model \nThis enables sharding the converted checkpoint",
)
parser.add_argument(
"--pretraining_tp",
default=4,
type=int,
help="Pretraining TP rank that has been used when training the model in Megatron-LM \n",
)
__A : Tuple = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 656 | """simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Optional[int] = {"vocab_file": "spiece.model"}
__A : List[Any] = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Dict="<s>" , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : Union[str, Any]="<unk>" , UpperCamelCase__ : Optional[int]="<sep>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[int]="<cls>" , UpperCamelCase__ : List[str]="<mask>" , UpperCamelCase__ : Optional[Any]=["<eop>", "<eod>"] , UpperCamelCase__ : Optional[Dict[str, Any]] = None , **UpperCamelCase__ : Dict , ):
A__ : List[str] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
A__ : Tuple ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
A__ : Dict =3
A__ : int =do_lower_case
A__ : str =remove_space
A__ : Optional[Any] =keep_accents
A__ : int =vocab_file
A__ : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
A__ : Union[str, Any] =jieba
A__ : List[str] =str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _UpperCAmelCase ( self : Union[str, Any] ):
return len(self.sp_model )
def _UpperCAmelCase ( self : Optional[int] ):
A__ : Any ={self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ):
A__ : Union[str, Any] =self.__dict__.copy()
A__ : Tuple =None
return state
def __setstate__( self : Tuple , UpperCamelCase__ : int ):
A__ : Union[str, Any] =d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A__ : Optional[int] ={}
A__ : Union[str, Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Dict ):
if self.remove_space:
A__ : Optional[int] =" ".join(inputs.strip().split() )
else:
A__ : Optional[Any] =inputs
A__ : Any =outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
A__ : Optional[Any] =unicodedata.normalize("NFKD" , UpperCamelCase__ )
A__ : Tuple ="".join([c for c in outputs if not unicodedata.combining(UpperCamelCase__ )] )
if self.do_lower_case:
A__ : str =outputs.lower()
return outputs
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : str ):
A__ : Optional[int] =self.preprocess_text(UpperCamelCase__ )
A__ : Dict =self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
A__ : List[str] =[]
for piece in pieces:
if len(UpperCamelCase__ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
A__ : str =self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase__ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A__ : Union[str, Any] =cur_pieces[1:]
else:
A__ : List[str] =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCamelCase__ )
else:
new_pieces.append(UpperCamelCase__ )
return new_pieces
def _UpperCAmelCase ( self : int , UpperCamelCase__ : str ):
return self.sp_model.PieceToId(UpperCamelCase__ )
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : List[Any] ):
return self.sp_model.IdToPiece(UpperCamelCase__ )
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : str ):
A__ : Optional[int] ="".join(UpperCamelCase__ ).replace(UpperCamelCase__ , " " ).strip()
return out_string
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : List[str] =[self.sep_token_id]
A__ : str =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is not None:
return ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1, 1]
return ([0] * len(UpperCamelCase__ )) + [1, 1]
def _UpperCAmelCase ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : List[str] =[self.sep_token_id]
A__ : Optional[Any] =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A__ : Tuple =os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
A__ : Optional[Any] =self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
def _UpperCAmelCase ( self : str , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : int ):
A__ : List[Any] =super()._decode(*UpperCamelCase__ , **UpperCamelCase__ )
A__ : Union[str, Any] =text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text
| 656 | 1 |
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowercase ( UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
A__ : Tuple =filter(lambda UpperCamelCase : p.requires_grad , model.parameters() )
A__ : Tuple =sum([np.prod(p.size() ) for p in model_parameters] )
return params
__A : Any = logging.getLogger(__name__)
def lowercase ( UpperCamelCase : str , UpperCamelCase : Optional[int] ):
"""simple docstring"""
if metric == "rouge2":
A__ : List[str] ="{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
A__ : Any ="{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
A__ : Tuple ="{val_avg_em:.4f}-{step_count}"
elif metric == "loss":
A__ : Tuple ="{val_avg_loss:.4f}-{step_count}"
else:
raise NotImplementedError(
F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
" function." )
A__ : Dict =ModelCheckpoint(
dirpath=UpperCamelCase , filename=UpperCamelCase , monitor=F'''val_{metric}''' , mode="max" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def lowercase ( UpperCamelCase : Any , UpperCamelCase : str ):
"""simple docstring"""
return EarlyStopping(
monitor=F'''val_{metric}''' , mode="min" if "loss" in metric else "max" , patience=UpperCamelCase , verbose=UpperCamelCase , )
class __lowerCAmelCase ( pl.Callback):
'''simple docstring'''
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] ):
A__ : int ={F'''lr_group_{i}''': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(UpperCamelCase__ )
@rank_zero_only
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : pl.Trainer , UpperCamelCase__ : pl.LightningModule , UpperCamelCase__ : str , UpperCamelCase__ : Any=True ):
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
A__ : Optional[Any] =trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
A__ : Tuple =Path(pl_module.hparams.output_dir )
if type_path == "test":
A__ : Union[str, Any] =od / "test_results.txt"
A__ : List[str] =od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
A__ : Optional[Any] =od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
A__ : List[Any] =od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=UpperCamelCase__ )
generations_file.parent.mkdir(exist_ok=UpperCamelCase__ )
with open(UpperCamelCase__ , "a+" ) as writer:
for key in sorted(UpperCamelCase__ ):
if key in ["log", "progress_bar", "preds"]:
continue
A__ : List[str] =metrics[key]
if isinstance(UpperCamelCase__ , torch.Tensor ):
A__ : Any =val.item()
A__ : Optional[int] =F'''{key}: {val:.6f}\n'''
writer.write(UpperCamelCase__ )
if not save_generations:
return
if "preds" in metrics:
A__ : List[Any] ="\n".join(metrics["preds"] )
generations_file.open("w+" ).write(UpperCamelCase__ )
@rank_zero_only
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] ):
try:
A__ : Tuple =pl_module.model.model.num_parameters()
except AttributeError:
A__ : Union[str, Any] =pl_module.model.num_parameters()
A__ : Any =count_trainable_parameters(UpperCamelCase__ )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1E6, "grad_mp": n_trainable_pars / 1E6} )
@rank_zero_only
def _UpperCAmelCase ( self : Any , UpperCamelCase__ : pl.Trainer , UpperCamelCase__ : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(UpperCamelCase__ , UpperCamelCase__ , "test" )
@rank_zero_only
def _UpperCAmelCase ( self : int , UpperCamelCase__ : pl.Trainer , UpperCamelCase__ : str ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 656 | """simple docstring"""
def lowercase ( UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ):
"""simple docstring"""
def count_of_possible_combinations(UpperCamelCase : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(UpperCamelCase )
def lowercase ( UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ):
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
UpperCamelCase : int , UpperCamelCase : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
A__ : str =sum(
count_of_possible_combinations_with_dp_array(target - item , UpperCamelCase )
for item in array )
A__ : List[str] =answer
return answer
A__ : List[Any] =[-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(UpperCamelCase , UpperCamelCase )
def lowercase ( UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ):
"""simple docstring"""
A__ : str =[0] * (target + 1)
A__ : Optional[Any] =1
for i in range(1 , target + 1 ):
for j in range(UpperCamelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Optional[Any] = 3
__A : Optional[Any] = 5
__A : int = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 656 | 1 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Dict = """MCTCTFeatureExtractor"""
__magic_name__ : str = """AutoTokenizer"""
def __init__( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] ):
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
A__ : str =self.feature_extractor
A__ : List[Any] =False
def __call__( self : str , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Dict ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*UpperCamelCase__ , **UpperCamelCase__ )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
A__ : Optional[Any] =kwargs.pop("raw_speech" )
else:
A__ : List[Any] =kwargs.pop("audio" , UpperCamelCase__ )
A__ : Optional[int] =kwargs.pop("sampling_rate" , UpperCamelCase__ )
A__ : Optional[Any] =kwargs.pop("text" , UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
A__ : List[Any] =args[0]
A__ : str =args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
A__ : int =self.feature_extractor(UpperCamelCase__ , *UpperCamelCase__ , sampling_rate=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None:
A__ : List[str] =self.tokenizer(UpperCamelCase__ , **UpperCamelCase__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A__ : List[str] =encodings["input_ids"]
return inputs
def _UpperCAmelCase ( self : str , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Optional[int] ):
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def _UpperCAmelCase ( self : Optional[Any] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[str] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*UpperCamelCase__ , **UpperCamelCase__ )
A__ : str =kwargs.pop("input_features" , UpperCamelCase__ )
A__ : int =kwargs.pop("labels" , UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
A__ : Union[str, Any] =args[0]
A__ : int =args[1:]
if input_features is not None:
A__ : Optional[int] =self.feature_extractor.pad(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ )
if labels is not None:
A__ : Optional[int] =self.tokenizer.pad(UpperCamelCase__ , **UpperCamelCase__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
A__ : Union[str, Any] =labels["input_ids"]
return input_features
def _UpperCAmelCase ( self : Any , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Tuple ):
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@contextmanager
def _UpperCAmelCase ( self : Optional[Any] ):
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
A__ : Dict =True
A__ : List[Any] =self.tokenizer
yield
A__ : Optional[Any] =self.feature_extractor
A__ : Optional[int] =False
| 656 | """simple docstring"""
import math
import tensorflow as tf
from packaging import version
def lowercase ( UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A__ : List[Any] =tf.convert_to_tensor(UpperCamelCase )
A__ : List[Any] =0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def lowercase ( UpperCamelCase : Optional[int] ):
"""simple docstring"""
A__ : Optional[Any] =tf.convert_to_tensor(UpperCamelCase )
A__ : Tuple =tf.cast(math.pi , x.dtype )
A__ : Dict =tf.cast(0.04_47_15 , x.dtype )
A__ : Optional[int] =0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(UpperCamelCase , 3 )) ))
return x * cdf
def lowercase ( UpperCamelCase : Optional[int] ):
"""simple docstring"""
A__ : List[str] =tf.convert_to_tensor(UpperCamelCase )
return x * tf.tanh(tf.math.softplus(UpperCamelCase ) )
def lowercase ( UpperCamelCase : List[str] ):
"""simple docstring"""
A__ : Union[str, Any] =tf.convert_to_tensor(UpperCamelCase )
A__ : List[Any] =tf.cast(0.04_47_15 , x.dtype )
A__ : List[Any] =tf.cast(0.79_78_84_56_08 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowercase ( UpperCamelCase : List[Any] ):
"""simple docstring"""
A__ : List[str] =tf.convert_to_tensor(UpperCamelCase )
A__ : str =tf.cast(1.7_02 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowercase ( UpperCamelCase : Tuple ):
"""simple docstring"""
return tf.clip_by_value(_gelu(UpperCamelCase ) , -10 , 10 )
def lowercase ( UpperCamelCase : str , UpperCamelCase : Any=-1 ):
"""simple docstring"""
A__ , A__ : Optional[Any] =tf.split(UpperCamelCase , 2 , axis=UpperCamelCase )
return a * tf.math.sigmoid(UpperCamelCase )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
return tf.keras.activations.gelu(UpperCamelCase , approximate=UpperCamelCase )
__A : Optional[Any] = tf.keras.activations.gelu
__A : Optional[Any] = approximate_gelu_wrap
else:
__A : Any = _gelu
__A : Union[str, Any] = _gelu_new
__A : List[str] = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def lowercase ( UpperCamelCase : List[Any] ):
"""simple docstring"""
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
| 656 | 1 |
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__A : Any = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__A : Optional[Any] = concatenate_datasets
__A : int = DownloadConfig
__A : int = DownloadManager
__A : Optional[Any] = DownloadMode
__A : int = DownloadConfig
__A : str = DownloadMode
__A : int = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 656 | """simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def _UpperCAmelCase ( self : Dict ):
A__ : Optional[Any] =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(UpperCamelCase__ , "num_attention_heads" ) )
self.parent.assertTrue(hasattr(UpperCamelCase__ , "num_encoder_blocks" ) )
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=13 , UpperCamelCase__ : Tuple=64 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : Union[str, Any]=4 , UpperCamelCase__ : Dict=[2, 2, 2, 2] , UpperCamelCase__ : Union[str, Any]=[8, 4, 2, 1] , UpperCamelCase__ : Tuple=[16, 32, 64, 128] , UpperCamelCase__ : Optional[int]=[1, 4, 8, 16] , UpperCamelCase__ : Any=[1, 2, 4, 8] , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Dict="gelu" , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Optional[Any]=None , ):
A__ : Tuple =parent
A__ : List[Any] =batch_size
A__ : List[Any] =image_size
A__ : Union[str, Any] =num_channels
A__ : Optional[int] =num_encoder_blocks
A__ : Any =sr_ratios
A__ : Any =depths
A__ : List[Any] =hidden_sizes
A__ : List[Any] =downsampling_rates
A__ : List[str] =num_attention_heads
A__ : int =is_training
A__ : List[Any] =use_labels
A__ : Any =hidden_act
A__ : Dict =hidden_dropout_prob
A__ : int =attention_probs_dropout_prob
A__ : List[Any] =initializer_range
A__ : Tuple =num_labels
A__ : List[Any] =scope
def _UpperCAmelCase ( self : Optional[int] ):
A__ : List[str] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ : Any =None
if self.use_labels:
A__ : Tuple =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A__ : List[Any] =self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self : Tuple ):
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ):
A__ : Any =SegformerModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Dict =model(UpperCamelCase__ )
A__ : Optional[int] =self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def _UpperCAmelCase ( self : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ):
A__ : str =self.num_labels
A__ : Optional[Any] =SegformerForSemanticSegmentation(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Optional[Any] =model(UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
A__ : List[Any] =model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def _UpperCAmelCase ( self : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ):
A__ : Tuple =1
A__ : Tuple =SegformerForSemanticSegmentation(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : List[str] =torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(UpperCamelCase__ )
A__ : Dict =model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertGreater(result.loss , 0.0 )
def _UpperCAmelCase ( self : str ):
A__ : Union[str, Any] =self.prepare_config_and_inputs()
A__ , A__ , A__ : Tuple =config_and_inputs
A__ : Tuple ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Dict = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__magic_name__ : Optional[int] = (
{
"""feature-extraction""": SegformerModel,
"""image-classification""": SegformerForImageClassification,
"""image-segmentation""": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__magic_name__ : Dict = True
__magic_name__ : List[str] = False
__magic_name__ : Optional[Any] = False
__magic_name__ : str = False
def _UpperCAmelCase ( self : Union[str, Any] ):
A__ : Union[str, Any] =SegformerModelTester(self )
A__ : Tuple =SegformerConfigTester(self , config_class=UpperCamelCase__ )
def _UpperCAmelCase ( self : str ):
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : Dict ):
A__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _UpperCAmelCase ( self : Tuple ):
A__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*UpperCamelCase__ )
def _UpperCAmelCase ( self : Union[str, Any] ):
A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*UpperCamelCase__ )
@unittest.skip("SegFormer does not use inputs_embeds" )
def _UpperCAmelCase ( self : Dict ):
pass
@unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods" )
def _UpperCAmelCase ( self : Tuple ):
pass
def _UpperCAmelCase ( self : List[str] ):
A__ , A__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : int =model_class(UpperCamelCase__ )
A__ : Optional[int] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : Optional[int] =[*signature.parameters.keys()]
A__ : List[str] =["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def _UpperCAmelCase ( self : str ):
A__ , A__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
A__ : Union[str, Any] =True
for model_class in self.all_model_classes:
A__ : Optional[Any] =True
A__ : Union[str, Any] =False
A__ : str =True
A__ : Optional[int] =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : str =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Any =outputs.attentions
A__ : List[str] =sum(self.model_tester.depths )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ : Dict =True
A__ : str =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Any =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Union[str, Any] =outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# verify the first attentions (first block, first layer)
A__ : List[Any] =(self.model_tester.image_size // 4) ** 2
A__ : Tuple =(self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
A__ : Tuple =(self.model_tester.image_size // 32) ** 2
A__ : Optional[Any] =(self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
A__ : int =len(UpperCamelCase__ )
# Check attention is always last and order is fine
A__ : Optional[Any] =True
A__ : Any =True
A__ : Union[str, Any] =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Optional[Any] =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + 1 , len(UpperCamelCase__ ) )
A__ : Optional[Any] =outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# verify the first attentions (first block, first layer)
A__ : Union[str, Any] =(self.model_tester.image_size // 4) ** 2
A__ : Tuple =(self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _UpperCAmelCase ( self : List[Any] ):
def check_hidden_states_output(UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ):
A__ : Optional[Any] =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : List[Any] =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Optional[Any] =outputs.hidden_states
A__ : int =self.model_tester.num_encoder_blocks
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
A__ , A__ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Optional[Any] =True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ : str =True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _UpperCAmelCase ( self : Optional[int] ):
if not self.model_tester.is_training:
return
A__ , A__ : int =self.model_tester.prepare_config_and_inputs_for_common()
A__ : List[Any] =True
for model_class in self.all_model_classes:
if model_class in get_values(UpperCamelCase__ ):
continue
A__ : List[Any] =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
A__ : int =self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
A__ : Union[str, Any] =model(**UpperCamelCase__ ).loss
loss.backward()
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _UpperCAmelCase ( self : Tuple ):
pass
@slow
def _UpperCAmelCase ( self : Tuple ):
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Tuple =SegformerModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def lowercase ( ):
"""simple docstring"""
A__ : List[Any] =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
@slow
def _UpperCAmelCase ( self : Tuple ):
# only resize + normalize
A__ : List[Any] =SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCamelCase__ , align=UpperCamelCase__ , do_random_crop=UpperCamelCase__ )
A__ : Union[str, Any] =SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
UpperCamelCase__ )
A__ : Union[str, Any] =prepare_img()
A__ : Union[str, Any] =image_processor(images=UpperCamelCase__ , return_tensors="pt" )
A__ : int =encoded_inputs.pixel_values.to(UpperCamelCase__ )
with torch.no_grad():
A__ : int =model(UpperCamelCase__ )
A__ : Dict =torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A__ : Optional[int] =torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
@slow
def _UpperCAmelCase ( self : Union[str, Any] ):
# only resize + normalize
A__ : Dict =SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCamelCase__ , align=UpperCamelCase__ , do_random_crop=UpperCamelCase__ )
A__ : int =SegformerForSemanticSegmentation.from_pretrained(
"nvidia/segformer-b1-finetuned-cityscapes-1024-1024" ).to(UpperCamelCase__ )
A__ : Tuple =prepare_img()
A__ : str =image_processor(images=UpperCamelCase__ , return_tensors="pt" )
A__ : Optional[int] =encoded_inputs.pixel_values.to(UpperCamelCase__ )
with torch.no_grad():
A__ : int =model(UpperCamelCase__ )
A__ : List[str] =torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A__ : List[Any] =torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1E-1 ) )
@slow
def _UpperCAmelCase ( self : int ):
# only resize + normalize
A__ : Optional[Any] =SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCamelCase__ , align=UpperCamelCase__ , do_random_crop=UpperCamelCase__ )
A__ : List[Any] =SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
UpperCamelCase__ )
A__ : str =prepare_img()
A__ : Dict =image_processor(images=UpperCamelCase__ , return_tensors="pt" )
A__ : Any =encoded_inputs.pixel_values.to(UpperCamelCase__ )
with torch.no_grad():
A__ : Dict =model(UpperCamelCase__ )
A__ : Any =outputs.logits.detach().cpu()
A__ : Union[str, Any] =image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(500, 300)] )
A__ : List[str] =torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
A__ : int =image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ )
A__ : Tuple =torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
| 656 | 1 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
@slow
def _UpperCAmelCase ( self : Dict ):
A__ : int =XLMRobertaModel.from_pretrained("xlm-roberta-base" )
A__ : Tuple =torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] )
# The dog is cute and lives in the garden house
A__ : List[str] =torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
A__ : Dict =torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
A__ : Tuple =model(UpperCamelCase__ )["last_hidden_state"].detach()
self.assertEqual(output.shape , UpperCamelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCamelCase__ , atol=1E-3 ) )
@slow
def _UpperCAmelCase ( self : List[Any] ):
A__ : Optional[Any] =XLMRobertaModel.from_pretrained("xlm-roberta-large" )
A__ : List[Any] =torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] )
# The dog is cute and lives in the garden house
A__ : Optional[Any] =torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
A__ : str =torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
A__ : str =model(UpperCamelCase__ )["last_hidden_state"].detach()
self.assertEqual(output.shape , UpperCamelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCamelCase__ , atol=1E-3 ) )
| 656 | """simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any]=13 , UpperCamelCase__ : Optional[int]=7 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : List[str]=99 , UpperCamelCase__ : Optional[Any]=32 , UpperCamelCase__ : Any=5 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : Union[str, Any]=37 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Optional[Any]=512 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : List[Any]=4 , ):
A__ : str =parent
A__ : List[str] =batch_size
A__ : Any =seq_length
A__ : List[str] =is_training
A__ : List[Any] =use_attention_mask
A__ : List[Any] =use_token_type_ids
A__ : Dict =use_labels
A__ : List[Any] =vocab_size
A__ : Optional[int] =hidden_size
A__ : Optional[Any] =num_hidden_layers
A__ : str =num_attention_heads
A__ : int =intermediate_size
A__ : Tuple =hidden_act
A__ : Tuple =hidden_dropout_prob
A__ : Dict =attention_probs_dropout_prob
A__ : Any =max_position_embeddings
A__ : Any =type_vocab_size
A__ : Union[str, Any] =type_sequence_label_size
A__ : Optional[Any] =initializer_range
A__ : int =num_choices
def _UpperCAmelCase ( self : Tuple ):
A__ : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : List[str] =None
if self.use_attention_mask:
A__ : Optional[int] =random_attention_mask([self.batch_size, self.seq_length] )
A__ : str =None
if self.use_token_type_ids:
A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ : Any =RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCAmelCase ( self : Tuple ):
A__ : Dict =self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ : str =config_and_inputs
A__ : Optional[Any] ={"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def _UpperCAmelCase ( self : int ):
A__ : str =self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ : Union[str, Any] =config_and_inputs
A__ : Union[str, Any] =True
A__ : List[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class __lowerCAmelCase ( _UpperCamelCase , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Union[str, Any] = True
__magic_name__ : Dict = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _UpperCAmelCase ( self : Optional[int] ):
A__ : Optional[int] =FlaxRobertaPreLayerNormModelTester(self )
@slow
def _UpperCAmelCase ( self : List[Any] ):
for model_class_name in self.all_model_classes:
A__ : Tuple =model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=UpperCamelCase__ )
A__ : Union[str, Any] =model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase__ )
@require_flax
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
@slow
def _UpperCAmelCase ( self : Tuple ):
A__ : Any =FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=UpperCamelCase__ )
A__ : Tuple =np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa )
A__ : str =model(UpperCamelCase__ )[0]
A__ : List[Any] =[1, 11, 50265]
self.assertEqual(list(output.shape ) , UpperCamelCase__ )
# compare the actual values for a slice.
A__ : Any =np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
@slow
def _UpperCAmelCase ( self : List[Any] ):
A__ : Union[str, Any] =FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=UpperCamelCase__ )
A__ : List[Any] =np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa )
A__ : Dict =model(UpperCamelCase__ )[0]
# compare the actual values for a slice.
A__ : Optional[Any] =np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
| 656 | 1 |
"""simple docstring"""
from functools import reduce
__A : Tuple = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def lowercase ( UpperCamelCase : str = N ):
"""simple docstring"""
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda UpperCamelCase , UpperCamelCase : str(int(UpperCamelCase ) * int(UpperCamelCase ) ) , n[i : i + 13] ) )
for i in range(len(UpperCamelCase ) - 12 ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 656 | """simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : List[Any] = logging.get_logger(__name__)
__A : Any = [
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
__A : Optional[int] = [
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def lowercase ( UpperCamelCase : Tuple ):
"""simple docstring"""
A__ : Union[str, Any] =torch.load(UpperCamelCase , map_location="cpu" )
return sd
def lowercase ( UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : int=rename_keys_prefix ):
"""simple docstring"""
A__ : List[str] =OrderedDict()
A__ : str =torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
A__ : Optional[Any] =key
for name_pair in rename_keys_prefix:
A__ : int =new_key.replace(name_pair[0] , name_pair[1] )
A__ : Dict =d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
A__ : Optional[int] =new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def lowercase ( UpperCamelCase : Dict , UpperCamelCase : List[str] ):
"""simple docstring"""
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
A__ : Any ="pretraining"
if "vcr" in checkpoint_path:
A__ : Union[str, Any] ={"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
A__ : Optional[Any] ={"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
A__ : Optional[int] ={"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
A__ : List[str] ={"visual_embedding_dim": 1024}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
A__ : Optional[int] ={"visual_embedding_dim": 512}
A__ : List[str] ="multichoice"
elif "vqa_advanced" in checkpoint_path:
A__ : Any ={"visual_embedding_dim": 2048}
A__ : str ="vqa_advanced"
elif "vqa" in checkpoint_path:
A__ : Optional[int] ={"visual_embedding_dim": 2048, "num_labels": 3129}
A__ : str ="vqa"
elif "nlvr" in checkpoint_path:
A__ : str ={
"visual_embedding_dim": 1024,
"num_labels": 2,
}
A__ : Dict ="nlvr"
A__ : Union[str, Any] =VisualBertConfig(**UpperCamelCase )
# Load State Dict
A__ : int =load_state_dict(UpperCamelCase )
A__ : Tuple =get_new_dict(UpperCamelCase , UpperCamelCase )
if model_type == "pretraining":
A__ : str =VisualBertForPreTraining(UpperCamelCase )
elif model_type == "vqa":
A__ : Optional[int] =VisualBertForQuestionAnswering(UpperCamelCase )
elif model_type == "nlvr":
A__ : Union[str, Any] =VisualBertForVisualReasoning(UpperCamelCase )
elif model_type == "multichoice":
A__ : Union[str, Any] =VisualBertForMultipleChoice(UpperCamelCase )
model.load_state_dict(UpperCamelCase )
# Save Checkpoints
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
model.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
__A : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
__A : str = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 656 | 1 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : Any = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__A : Any = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.cross_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.cross_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias"""))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qcontent_proj.weight""", f"""decoder.layers.{i}.sa_qcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kcontent_proj.weight""", f"""decoder.layers.{i}.sa_kcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qpos_proj.weight""", f"""decoder.layers.{i}.sa_qpos_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kpos_proj.weight""", f"""decoder.layers.{i}.sa_kpos_proj.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_v_proj.weight""", f"""decoder.layers.{i}.sa_v_proj.weight"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qcontent_proj.weight""", f"""decoder.layers.{i}.ca_qcontent_proj.weight""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kcontent_proj.weight""", f"""decoder.layers.{i}.ca_kcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kpos_proj.weight""", f"""decoder.layers.{i}.ca_kpos_proj.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_v_proj.weight""", f"""decoder.layers.{i}.ca_v_proj.weight"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight""", f"""decoder.layers.{i}.ca_qpos_sine_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qcontent_proj.bias""", f"""decoder.layers.{i}.sa_qcontent_proj.bias""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kcontent_proj.bias""", f"""decoder.layers.{i}.sa_kcontent_proj.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_qpos_proj.bias""", f"""decoder.layers.{i}.sa_qpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_kpos_proj.bias""", f"""decoder.layers.{i}.sa_kpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_v_proj.bias""", f"""decoder.layers.{i}.sa_v_proj.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qcontent_proj.bias""", f"""decoder.layers.{i}.ca_qcontent_proj.bias""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kcontent_proj.bias""", f"""decoder.layers.{i}.ca_kcontent_proj.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_kpos_proj.bias""", f"""decoder.layers.{i}.ca_kpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_v_proj.bias""", f"""decoder.layers.{i}.ca_v_proj.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias""", f"""decoder.layers.{i}.ca_qpos_sine_proj.bias""")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def lowercase ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : int ):
"""simple docstring"""
A__ : List[str] =state_dict.pop(UpperCamelCase )
A__ : int =val
def lowercase ( UpperCamelCase : Tuple ):
"""simple docstring"""
A__ : Dict =OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A__ : Optional[Any] =key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
A__ : Tuple =value
else:
A__ : List[str] =value
return new_state_dict
def lowercase ( UpperCamelCase : Dict , UpperCamelCase : Union[str, Any]=False ):
"""simple docstring"""
A__ : Optional[Any] =""
if is_panoptic:
A__ : int ="conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A__ : Any =state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
A__ : Tuple =state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
A__ : Optional[int] =in_proj_weight[:256, :]
A__ : Union[str, Any] =in_proj_bias[:256]
A__ : Union[str, Any] =in_proj_weight[256:512, :]
A__ : str =in_proj_bias[256:512]
A__ : Union[str, Any] =in_proj_weight[-256:, :]
A__ : str =in_proj_bias[-256:]
def lowercase ( ):
"""simple docstring"""
A__ : int ="http://images.cocodataset.org/val2017/000000039769.jpg"
A__ : Tuple =Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def lowercase ( UpperCamelCase : Union[str, Any] , UpperCamelCase : int ):
"""simple docstring"""
A__ : List[str] =ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
A__ : int ="resnet101"
if "dc5" in model_name:
A__ : str =True
A__ : Any ="panoptic" in model_name
if is_panoptic:
A__ : List[Any] =250
else:
A__ : List[str] =91
A__ : List[Any] ="huggingface/label-files"
A__ : Dict ="coco-detection-id2label.json"
A__ : Any =json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="dataset" ) , "r" ) )
A__ : Optional[int] ={int(UpperCamelCase ): v for k, v in idalabel.items()}
A__ : Optional[int] =idalabel
A__ : Any ={v: k for k, v in idalabel.items()}
# load image processor
A__ : Optional[Any] ="coco_panoptic" if is_panoptic else "coco_detection"
A__ : Dict =ConditionalDetrImageProcessor(format=UpperCamelCase )
# prepare image
A__ : Union[str, Any] =prepare_img()
A__ : Optional[Any] =image_processor(images=UpperCamelCase , return_tensors="pt" )
A__ : Optional[int] =encoding["pixel_values"]
logger.info(F'''Converting model {model_name}...''' )
# load original model from torch hub
A__ : Dict =torch.hub.load("DeppMeng/ConditionalDETR" , UpperCamelCase , pretrained=UpperCamelCase ).eval()
A__ : List[Any] =conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
A__ : Dict ="conditional_detr." + src
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ : Union[str, Any] =rename_backbone_keys(UpperCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCamelCase , is_panoptic=UpperCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A__ : List[Any] ="conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
A__ : List[str] =state_dict.pop(UpperCamelCase )
A__ : str =val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
A__ : Optional[Any] =state_dict.pop(UpperCamelCase )
A__ : Tuple =val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
A__ : Optional[int] =state_dict.pop(UpperCamelCase )
A__ : List[Any] =val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
A__ : List[str] =state_dict.pop(UpperCamelCase )
A__ : Any =val
# finally, create HuggingFace model and load state dict
A__ : List[str] =ConditionalDetrForSegmentation(UpperCamelCase ) if is_panoptic else ConditionalDetrForObjectDetection(UpperCamelCase )
model.load_state_dict(UpperCamelCase )
model.eval()
model.push_to_hub(repo_id=UpperCamelCase , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
A__ : int =conditional_detr(UpperCamelCase )
A__ : str =model(UpperCamelCase )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
model.save_pretrained(UpperCamelCase )
image_processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
__A : Union[str, Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 656 | """simple docstring"""
__A : Union[str, Any] = {str(digit): digit**5 for digit in range(10)}
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(UpperCamelCase ) )
def lowercase ( ):
"""simple docstring"""
return sum(
number
for number in range(1000 , 1000000 )
if number == digits_fifth_powers_sum(UpperCamelCase ) )
if __name__ == "__main__":
print(solution())
| 656 | 1 |
"""simple docstring"""
__A : Any = 256
# Modulus to hash a string
__A : Dict = 1_000_003
def lowercase ( UpperCamelCase : str , UpperCamelCase : str ):
"""simple docstring"""
A__ : List[Any] =len(UpperCamelCase )
A__ : Tuple =len(UpperCamelCase )
if p_len > t_len:
return False
A__ : Optional[Any] =0
A__ : Dict =0
A__ : Union[str, Any] =1
# Calculating the hash of pattern and substring of text
for i in range(UpperCamelCase ):
A__ : Tuple =(ord(pattern[i] ) + p_hash * alphabet_size) % modulus
A__ : Union[str, Any] =(ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
A__ : Optional[Any] =(modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
A__ : str =(
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def lowercase ( ):
"""simple docstring"""
A__ : Dict ="abc1abc12"
A__ : Dict ="alskfjaldsabc1abc1abc12k23adsfabcabc"
A__ : List[Any] ="alskfjaldsk23adsfabcabc"
assert rabin_karp(UpperCamelCase , UpperCamelCase ) and not rabin_karp(UpperCamelCase , UpperCamelCase )
# Test 2)
A__ : Optional[Any] ="ABABX"
A__ : Union[str, Any] ="ABABZABABYABABX"
assert rabin_karp(UpperCamelCase , UpperCamelCase )
# Test 3)
A__ : Any ="AAAB"
A__ : Union[str, Any] ="ABAAAAAB"
assert rabin_karp(UpperCamelCase , UpperCamelCase )
# Test 4)
A__ : Dict ="abcdabcy"
A__ : Dict ="abcxabcdabxabcdabcdabcy"
assert rabin_karp(UpperCamelCase , UpperCamelCase )
# Test 5)
A__ : List[str] ="Lü"
A__ : Dict ="Lüsai"
assert rabin_karp(UpperCamelCase , UpperCamelCase )
A__ : Optional[int] ="Lue"
assert not rabin_karp(UpperCamelCase , UpperCamelCase )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 656 | """simple docstring"""
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
__A : Optional[Any] = logging.get_logger(__name__)
# General docstring
__A : str = "PoolFormerConfig"
# Base docstring
__A : Optional[Any] = "sail/poolformer_s12"
__A : List[Any] = [1, 512, 7, 7]
# Image classification docstring
__A : List[str] = "sail/poolformer_s12"
__A : Tuple = "tabby, tabby cat"
__A : Tuple = [
"sail/poolformer_s12",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def lowercase ( UpperCamelCase : Any , UpperCamelCase : float = 0.0 , UpperCamelCase : bool = False ):
"""simple docstring"""
if drop_prob == 0.0 or not training:
return input
A__ : Tuple =1 - drop_prob
A__ : List[str] =(input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
A__ : Any =keep_prob + torch.rand(UpperCamelCase , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
A__ : Optional[int] =input.div(UpperCamelCase ) * random_tensor
return output
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : Optional[float] = None ):
super().__init__()
A__ : Optional[int] =drop_prob
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : torch.Tensor ):
return drop_path(UpperCamelCase__ , self.drop_prob , self.training )
def _UpperCAmelCase ( self : List[str] ):
return "p={}".format(self.drop_prob )
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int=None ):
super().__init__()
A__ : Optional[int] =patch_size if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (patch_size, patch_size)
A__ : Optional[int] =stride if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (stride, stride)
A__ : int =padding if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (padding, padding)
A__ : Any =nn.Convad(UpperCamelCase__ , UpperCamelCase__ , kernel_size=UpperCamelCase__ , stride=UpperCamelCase__ , padding=UpperCamelCase__ )
A__ : Any =norm_layer(UpperCamelCase__ ) if norm_layer else nn.Identity()
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : str ):
A__ : List[str] =self.projection(UpperCamelCase__ )
A__ : Any =self.norm(UpperCamelCase__ )
return embeddings
class __lowerCAmelCase ( nn.GroupNorm):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : Dict , **UpperCamelCase__ : Union[str, Any] ):
super().__init__(1 , UpperCamelCase__ , **UpperCamelCase__ )
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : Optional[int] ):
super().__init__()
A__ : Any =nn.AvgPoolad(UpperCamelCase__ , stride=1 , padding=pool_size // 2 , count_include_pad=UpperCamelCase__ )
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : List[str] ):
return self.pool(UpperCamelCase__ ) - hidden_states
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] ):
super().__init__()
A__ : List[Any] =nn.Convad(UpperCamelCase__ , UpperCamelCase__ , 1 )
A__ : Union[str, Any] =nn.Convad(UpperCamelCase__ , UpperCamelCase__ , 1 )
A__ : Dict =PoolFormerDropPath(UpperCamelCase__ )
if isinstance(config.hidden_act , UpperCamelCase__ ):
A__ : Tuple =ACTaFN[config.hidden_act]
else:
A__ : Optional[Any] =config.hidden_act
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : Dict ):
A__ : Optional[Any] =self.conva(UpperCamelCase__ )
A__ : List[str] =self.act_fn(UpperCamelCase__ )
A__ : List[str] =self.drop(UpperCamelCase__ )
A__ : Optional[int] =self.conva(UpperCamelCase__ )
A__ : Optional[Any] =self.drop(UpperCamelCase__ )
return hidden_states
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any ):
super().__init__()
A__ : Optional[int] =PoolFormerPooling(UpperCamelCase__ )
A__ : List[str] =PoolFormerOutput(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A__ : int =PoolFormerGroupNorm(UpperCamelCase__ )
A__ : int =PoolFormerGroupNorm(UpperCamelCase__ )
# Useful for training neural nets
A__ : Tuple =PoolFormerDropPath(UpperCamelCase__ ) if drop_path > 0.0 else nn.Identity()
A__ : Optional[Any] =config.use_layer_scale
if config.use_layer_scale:
A__ : List[str] =nn.Parameter(
config.layer_scale_init_value * torch.ones((UpperCamelCase__) ) , requires_grad=UpperCamelCase__ )
A__ : List[Any] =nn.Parameter(
config.layer_scale_init_value * torch.ones((UpperCamelCase__) ) , requires_grad=UpperCamelCase__ )
def _UpperCAmelCase ( self : Any , UpperCamelCase__ : Optional[int] ):
if self.use_layer_scale:
A__ : Optional[int] =self.pooling(self.before_norm(UpperCamelCase__ ) )
A__ : Union[str, Any] =self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
A__ : Union[str, Any] =hidden_states + self.drop_path(UpperCamelCase__ )
A__ : Tuple =()
A__ : List[str] =self.output(self.after_norm(UpperCamelCase__ ) )
A__ : Optional[Any] =self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
A__ : str =hidden_states + self.drop_path(UpperCamelCase__ )
A__ : List[Any] =(output,) + outputs
return outputs
else:
A__ : Tuple =self.drop_path(self.pooling(self.before_norm(UpperCamelCase__ ) ) )
# First residual connection
A__ : Optional[Any] =pooling_output + hidden_states
A__ : Tuple =()
# Second residual connection inside the PoolFormerOutput block
A__ : List[str] =self.drop_path(self.output(self.after_norm(UpperCamelCase__ ) ) )
A__ : Any =hidden_states + layer_output
A__ : Tuple =(output,) + outputs
return outputs
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : List[str] ):
super().__init__()
A__ : Tuple =config
# stochastic depth decay rule
A__ : Dict =[x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
A__ : Tuple =[]
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
A__ : List[str] =nn.ModuleList(UpperCamelCase__ )
# Transformer blocks
A__ : Union[str, Any] =[]
A__ : Any =0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
A__ : Union[str, Any] =[]
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
UpperCamelCase__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(UpperCamelCase__ ) )
A__ : str =nn.ModuleList(UpperCamelCase__ )
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Optional[int]=True ):
A__ : Union[str, Any] =() if output_hidden_states else None
A__ : Dict =pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
A__ , A__ : List[Any] =layers
# Get patch embeddings from hidden_states
A__ : Any =embedding_layer(UpperCamelCase__ )
# Send the embeddings through the blocks
for _, blk in enumerate(UpperCamelCase__ ):
A__ : List[str] =blk(UpperCamelCase__ )
A__ : Tuple =layer_outputs[0]
if output_hidden_states:
A__ : List[Any] =all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=UpperCamelCase__ , hidden_states=UpperCamelCase__ )
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : List[str] = PoolFormerConfig
__magic_name__ : int = """poolformer"""
__magic_name__ : Any = """pixel_values"""
__magic_name__ : Any = True
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : str ):
if isinstance(UpperCamelCase__ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCamelCase__ , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any]=False ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Optional[Any] =value
__A : Optional[int] = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__A : Dict = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n"
@add_start_docstrings(
"""The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.""" , _UpperCamelCase , )
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : Dict ):
super().__init__(UpperCamelCase__ )
A__ : List[Any] =config
A__ : Optional[Any] =PoolFormerEncoder(UpperCamelCase__ )
# Initialize weights and apply final processing
self.post_init()
def _UpperCAmelCase ( self : Tuple ):
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(UpperCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _UpperCAmelCase ( self : str , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , ):
A__ : int =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A__ : Optional[int] =return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
A__ : List[Any] =self.encoder(
UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , return_dict=UpperCamelCase__ , )
A__ : int =encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=UpperCamelCase__ , hidden_states=encoder_outputs.hidden_states , )
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Optional[Any] ):
super().__init__()
A__ : List[str] =nn.Linear(config.hidden_size , config.hidden_size )
def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : List[Any] ):
A__ : int =self.dense(UpperCamelCase__ )
return output
@add_start_docstrings(
"""
PoolFormer Model transformer with an image classification head on top
""" , _UpperCamelCase , )
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : str ):
super().__init__(UpperCamelCase__ )
A__ : List[str] =config.num_labels
A__ : Optional[int] =PoolFormerModel(UpperCamelCase__ )
# Final norm
A__ : Dict =PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
A__ : Dict =(
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , ):
A__ : Tuple =return_dict if return_dict is not None else self.config.use_return_dict
A__ : List[str] =self.poolformer(
UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , return_dict=UpperCamelCase__ , )
A__ : str =outputs[0]
A__ : List[Any] =self.classifier(self.norm(UpperCamelCase__ ).mean([-2, -1] ) )
A__ : Optional[Any] =None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A__ : int ="regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A__ : Tuple ="single_label_classification"
else:
A__ : Optional[int] ="multi_label_classification"
if self.config.problem_type == "regression":
A__ : Dict =MSELoss()
if self.num_labels == 1:
A__ : Optional[Any] =loss_fct(logits.squeeze() , labels.squeeze() )
else:
A__ : List[str] =loss_fct(UpperCamelCase__ , UpperCamelCase__ )
elif self.config.problem_type == "single_label_classification":
A__ : Tuple =CrossEntropyLoss()
A__ : int =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A__ : List[Any] =BCEWithLogitsLoss()
A__ : str =loss_fct(UpperCamelCase__ , UpperCamelCase__ )
if not return_dict:
A__ : Optional[int] =(logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=UpperCamelCase__ , logits=UpperCamelCase__ , hidden_states=outputs.hidden_states )
| 656 | 1 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__A : Any = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
__magic_name__ : Optional[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__magic_name__ : List[str] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__magic_name__ : Any = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__magic_name__ : Union[str, Any] = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int ):
A__ : Dict =ZeroShotClassificationPipeline(
model=UpperCamelCase__ , tokenizer=UpperCamelCase__ , candidate_labels=["polics", "health"] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : int ):
A__ : List[str] =classifier("Who are you voting for in 2020?" , candidate_labels="politics" )
self.assertEqual(UpperCamelCase__ , {"sequence": ANY(UpperCamelCase__ ), "labels": [ANY(UpperCamelCase__ )], "scores": [ANY(UpperCamelCase__ )]} )
# No kwarg
A__ : List[str] =classifier("Who are you voting for in 2020?" , ["politics"] )
self.assertEqual(UpperCamelCase__ , {"sequence": ANY(UpperCamelCase__ ), "labels": [ANY(UpperCamelCase__ )], "scores": [ANY(UpperCamelCase__ )]} )
A__ : Any =classifier("Who are you voting for in 2020?" , candidate_labels=["politics"] )
self.assertEqual(UpperCamelCase__ , {"sequence": ANY(UpperCamelCase__ ), "labels": [ANY(UpperCamelCase__ )], "scores": [ANY(UpperCamelCase__ )]} )
A__ : Optional[Any] =classifier("Who are you voting for in 2020?" , candidate_labels="politics, public health" )
self.assertEqual(
UpperCamelCase__ , {"sequence": ANY(UpperCamelCase__ ), "labels": [ANY(UpperCamelCase__ ), ANY(UpperCamelCase__ )], "scores": [ANY(UpperCamelCase__ ), ANY(UpperCamelCase__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
A__ : Any =classifier("Who are you voting for in 2020?" , candidate_labels=["politics", "public health"] )
self.assertEqual(
UpperCamelCase__ , {"sequence": ANY(UpperCamelCase__ ), "labels": [ANY(UpperCamelCase__ ), ANY(UpperCamelCase__ )], "scores": [ANY(UpperCamelCase__ ), ANY(UpperCamelCase__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
A__ : List[Any] =classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="This text is about {}" )
self.assertEqual(UpperCamelCase__ , {"sequence": ANY(UpperCamelCase__ ), "labels": [ANY(UpperCamelCase__ )], "scores": [ANY(UpperCamelCase__ )]} )
# https://github.com/huggingface/transformers/issues/13846
A__ : Dict =classifier(["I am happy"] , ["positive", "negative"] )
self.assertEqual(
UpperCamelCase__ , [
{"sequence": ANY(UpperCamelCase__ ), "labels": [ANY(UpperCamelCase__ ), ANY(UpperCamelCase__ )], "scores": [ANY(UpperCamelCase__ ), ANY(UpperCamelCase__ )]}
for i in range(1 )
] , )
A__ : Optional[Any] =classifier(["I am happy", "I am sad"] , ["positive", "negative"] )
self.assertEqual(
UpperCamelCase__ , [
{"sequence": ANY(UpperCamelCase__ ), "labels": [ANY(UpperCamelCase__ ), ANY(UpperCamelCase__ )], "scores": [ANY(UpperCamelCase__ ), ANY(UpperCamelCase__ )]}
for i in range(2 )
] , )
with self.assertRaises(UpperCamelCase__ ):
classifier("" , candidate_labels="politics" )
with self.assertRaises(UpperCamelCase__ ):
classifier(UpperCamelCase__ , candidate_labels="politics" )
with self.assertRaises(UpperCamelCase__ ):
classifier("Who are you voting for in 2020?" , candidate_labels="" )
with self.assertRaises(UpperCamelCase__ ):
classifier("Who are you voting for in 2020?" , candidate_labels=UpperCamelCase__ )
with self.assertRaises(UpperCamelCase__ ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="Not formatting template" , )
with self.assertRaises(UpperCamelCase__ ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template=UpperCamelCase__ , )
self.run_entailment_id(UpperCamelCase__ )
def _UpperCAmelCase ( self : List[Any] , UpperCamelCase__ : Pipeline ):
A__ : int =zero_shot_classifier.model.config
A__ : int =config.labelaid
A__ : List[Any] =zero_shot_classifier.entailment_id
A__ : Dict ={"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
A__ : Optional[int] ={"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
A__ : int ={"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
A__ : Any ={"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
A__ : str =original_labelaid
self.assertEqual(UpperCamelCase__ , zero_shot_classifier.entailment_id )
@require_torch
def _UpperCAmelCase ( self : List[str] ):
A__ : List[str] =pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 100 , candidate_labels=["politics", "public health", "science"] )
@require_torch
def _UpperCAmelCase ( self : List[Any] ):
A__ : Dict =pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
A__ : Union[str, Any] =zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
} , )
@require_tf
def _UpperCAmelCase ( self : Tuple ):
A__ : Any =pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="tf" , )
A__ : Tuple =zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def _UpperCAmelCase ( self : Union[str, Any] ):
A__ : str =pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="pt" )
A__ : str =zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
} , )
A__ : Dict =zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=UpperCamelCase__ , )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def _UpperCAmelCase ( self : Tuple ):
A__ : Tuple =pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="tf" )
A__ : Tuple =zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
} , )
A__ : Optional[Any] =zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=UpperCamelCase__ , )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
} , )
| 656 | """simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
'''simple docstring'''
__magic_name__ : int = IFInpaintingSuperResolutionPipeline
__magic_name__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
__magic_name__ : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""})
__magic_name__ : Optional[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def _UpperCAmelCase ( self : Union[str, Any] ):
return self._get_superresolution_dummy_components()
def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int]=0 ):
if str(UpperCamelCase__ ).startswith("mps" ):
A__ : Any =torch.manual_seed(UpperCamelCase__ )
else:
A__ : Dict =torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
A__ : Tuple =floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
A__ : Optional[int] =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
A__ : Any =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
A__ : List[str] ={
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _UpperCAmelCase ( self : Dict ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _UpperCAmelCase ( self : int ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def _UpperCAmelCase ( self : Tuple ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _UpperCAmelCase ( self : str ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _UpperCAmelCase ( self : Dict ):
self._test_save_load_local()
def _UpperCAmelCase ( self : Optional[int] ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 656 | 1 |
"""simple docstring"""
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
__magic_name__ : int
__magic_name__ : int
__magic_name__ : float = 0.0
__magic_name__ : int = 1
__magic_name__ : int = 1
__magic_name__ : bool = True
__magic_name__ : bool = False
__magic_name__ : bool = False
__magic_name__ : bool = False
__magic_name__ : jnp.dtype = jnp.floataa
def _UpperCAmelCase ( self : Optional[int] ):
A__ : List[str] =[]
A__ : Any =[]
for i in range(self.num_layers ):
A__ : List[str] =self.in_channels if i == 0 else self.out_channels
A__ : str =FlaxResnetBlockaD(
in_channels=UpperCamelCase__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase__ )
A__ : Optional[int] =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase__ )
A__ : Optional[int] =resnets
A__ : List[Any] =attentions
if self.add_downsample:
A__ : Any =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : List[Any]=True ):
A__ : str =()
for resnet, attn in zip(self.resnets , self.attentions ):
A__ : Dict =resnet(UpperCamelCase__ , UpperCamelCase__ , deterministic=UpperCamelCase__ )
A__ : Tuple =attn(UpperCamelCase__ , UpperCamelCase__ , deterministic=UpperCamelCase__ )
output_states += (hidden_states,)
if self.add_downsample:
A__ : Tuple =self.downsamplers_a(UpperCamelCase__ )
output_states += (hidden_states,)
return hidden_states, output_states
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
__magic_name__ : int
__magic_name__ : int
__magic_name__ : float = 0.0
__magic_name__ : int = 1
__magic_name__ : bool = True
__magic_name__ : jnp.dtype = jnp.floataa
def _UpperCAmelCase ( self : List[str] ):
A__ : List[str] =[]
for i in range(self.num_layers ):
A__ : List[str] =self.in_channels if i == 0 else self.out_channels
A__ : List[Any] =FlaxResnetBlockaD(
in_channels=UpperCamelCase__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase__ )
A__ : Optional[Any] =resnets
if self.add_downsample:
A__ : Optional[int] =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str]=True ):
A__ : Optional[int] =()
for resnet in self.resnets:
A__ : List[Any] =resnet(UpperCamelCase__ , UpperCamelCase__ , deterministic=UpperCamelCase__ )
output_states += (hidden_states,)
if self.add_downsample:
A__ : str =self.downsamplers_a(UpperCamelCase__ )
output_states += (hidden_states,)
return hidden_states, output_states
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
__magic_name__ : int
__magic_name__ : int
__magic_name__ : int
__magic_name__ : float = 0.0
__magic_name__ : int = 1
__magic_name__ : int = 1
__magic_name__ : bool = True
__magic_name__ : bool = False
__magic_name__ : bool = False
__magic_name__ : bool = False
__magic_name__ : jnp.dtype = jnp.floataa
def _UpperCAmelCase ( self : Optional[int] ):
A__ : List[Any] =[]
A__ : Optional[int] =[]
for i in range(self.num_layers ):
A__ : Any =self.in_channels if (i == self.num_layers - 1) else self.out_channels
A__ : Any =self.prev_output_channel if i == 0 else self.out_channels
A__ : str =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase__ )
A__ : Dict =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase__ )
A__ : Dict =resnets
A__ : Dict =attentions
if self.add_upsample:
A__ : List[str] =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any]=True ):
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
A__ : Optional[int] =res_hidden_states_tuple[-1]
A__ : Dict =res_hidden_states_tuple[:-1]
A__ : Optional[int] =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
A__ : List[str] =resnet(UpperCamelCase__ , UpperCamelCase__ , deterministic=UpperCamelCase__ )
A__ : str =attn(UpperCamelCase__ , UpperCamelCase__ , deterministic=UpperCamelCase__ )
if self.add_upsample:
A__ : List[str] =self.upsamplers_a(UpperCamelCase__ )
return hidden_states
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
__magic_name__ : int
__magic_name__ : int
__magic_name__ : int
__magic_name__ : float = 0.0
__magic_name__ : int = 1
__magic_name__ : bool = True
__magic_name__ : jnp.dtype = jnp.floataa
def _UpperCAmelCase ( self : Optional[int] ):
A__ : str =[]
for i in range(self.num_layers ):
A__ : Any =self.in_channels if (i == self.num_layers - 1) else self.out_channels
A__ : int =self.prev_output_channel if i == 0 else self.out_channels
A__ : str =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase__ )
A__ : Union[str, Any] =resnets
if self.add_upsample:
A__ : List[Any] =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]=True ):
for resnet in self.resnets:
# pop res hidden states
A__ : Union[str, Any] =res_hidden_states_tuple[-1]
A__ : Dict =res_hidden_states_tuple[:-1]
A__ : Optional[int] =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
A__ : Tuple =resnet(UpperCamelCase__ , UpperCamelCase__ , deterministic=UpperCamelCase__ )
if self.add_upsample:
A__ : Optional[int] =self.upsamplers_a(UpperCamelCase__ )
return hidden_states
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
__magic_name__ : int
__magic_name__ : float = 0.0
__magic_name__ : int = 1
__magic_name__ : int = 1
__magic_name__ : bool = False
__magic_name__ : bool = False
__magic_name__ : jnp.dtype = jnp.floataa
def _UpperCAmelCase ( self : Union[str, Any] ):
# there is always at least one resnet
A__ : int =[
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
A__ : str =[]
for _ in range(self.num_layers ):
A__ : Tuple =FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase__ )
A__ : Tuple =FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase__ )
A__ : Tuple =resnets
A__ : Tuple =attentions
def __call__( self : str , UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str=True ):
A__ : Optional[int] =self.resnets[0](UpperCamelCase__ , UpperCamelCase__ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
A__ : Dict =attn(UpperCamelCase__ , UpperCamelCase__ , deterministic=UpperCamelCase__ )
A__ : Dict =resnet(UpperCamelCase__ , UpperCamelCase__ , deterministic=UpperCamelCase__ )
return hidden_states
| 656 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__A : Any = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
__A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 656 | 1 |
"""simple docstring"""
__A : List[Any] = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
__A : Optional[Any] = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
__A : Any = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
__A : Tuple = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
__A : Optional[int] = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
__A : Optional[int] = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
__A : List[str] = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
__A : List[Any] = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 656 | """simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowercase ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any]=10 ):
"""simple docstring"""
A__ : Tuple =[]
for _ in range(UpperCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowercase ( UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any]=10 ):
"""simple docstring"""
A__ : Dict =[]
for step in range(UpperCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : List[Any] =os.path.join(UpperCamelCase , "schedule.bin" )
torch.save(scheduler.state_dict() , UpperCamelCase )
A__ : Dict =torch.load(UpperCamelCase )
scheduler.load_state_dict(UpperCamelCase )
return lrs
@require_torch
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int ):
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for a, b in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertAlmostEqual(UpperCamelCase__ , UpperCamelCase__ , delta=UpperCamelCase__ )
def _UpperCAmelCase ( self : Tuple ):
A__ : Any =torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase__ )
A__ : Optional[Any] =torch.tensor([0.4, 0.2, -0.5] )
A__ : Any =nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A__ : List[str] =AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
A__ : Optional[int] =criterion(UpperCamelCase__ , UpperCamelCase__ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def _UpperCAmelCase ( self : Dict ):
A__ : Optional[int] =torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase__ )
A__ : Dict =torch.tensor([0.4, 0.2, -0.5] )
A__ : Optional[int] =nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A__ : int =Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCamelCase__ , weight_decay=0.0 , relative_step=UpperCamelCase__ , scale_parameter=UpperCamelCase__ , warmup_init=UpperCamelCase__ , )
for _ in range(1000 ):
A__ : List[Any] =criterion(UpperCamelCase__ , UpperCamelCase__ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
__magic_name__ : Optional[int] = nn.Linear(50 , 50) if is_torch_available() else None
__magic_name__ : Any = AdamW(m.parameters() , lr=10.0) if is_torch_available() else None
__magic_name__ : Union[str, Any] = 10
def _UpperCAmelCase ( self : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int=None ):
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for a, b in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertAlmostEqual(UpperCamelCase__ , UpperCamelCase__ , delta=UpperCamelCase__ , msg=UpperCamelCase__ )
def _UpperCAmelCase ( self : Optional[Any] ):
A__ : Union[str, Any] ={"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
A__ : Union[str, Any] ={
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
A__ , A__ : Any =data
A__ : Union[str, Any] =scheduler_func(self.optimizer , **UpperCamelCase__ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
A__ : int =unwrap_schedule(UpperCamelCase__ , self.num_steps )
self.assertListAlmostEqual(
UpperCamelCase__ , UpperCamelCase__ , tol=1E-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , )
A__ : List[str] =scheduler_func(self.optimizer , **UpperCamelCase__ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(UpperCamelCase__ ) # wrap to test picklability of the schedule
A__ : Tuple =unwrap_and_save_reload_schedule(UpperCamelCase__ , self.num_steps )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ , msg=F'''failed for {scheduler_func} in save and reload''' )
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : str ):
A__ : int =fn
def __call__( self : List[Any] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[Any] ):
return self.fn(*UpperCamelCase__ , **UpperCamelCase__ )
@classmethod
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : Dict ):
A__ : str =list(map(self , scheduler.lr_lambdas ) )
| 656 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( UpperCamelCase : list[float] ):
"""simple docstring"""
if len(UpperCamelCase ) < 2:
raise ValueError("Monogons and Digons are not polygons in the Euclidean space" )
if any(i <= 0 for i in nums ):
raise ValueError("All values must be greater than 0" )
A__ : Union[str, Any] =nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 656 | """simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__A : List[Any] = logging.get_logger("transformers.models.speecht5")
__A : Optional[Any] = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
__A : Optional[int] = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
__A : List[str] = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
__A : List[Any] = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
__A : Union[str, Any] = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
__A : Any = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
__A : Union[str, Any] = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
__A : Optional[int] = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
__A : Union[str, Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__A : Optional[Any] = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__A : Optional[int] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__A : int = []
__A : int = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
__A : Optional[Any] = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
__A : Tuple = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
__A : Union[str, Any] = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def lowercase ( UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : List[Any] , UpperCamelCase : int ):
"""simple docstring"""
for attribute in key.split("." ):
A__ : Dict =getattr(UpperCamelCase , UpperCamelCase )
if weight_type is not None:
A__ : Union[str, Any] =getattr(UpperCamelCase , UpperCamelCase ).shape
else:
A__ : Tuple =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
A__ : Any =value
elif weight_type == "weight_g":
A__ : Any =value
elif weight_type == "weight_v":
A__ : Any =value
elif weight_type == "bias":
A__ : Tuple =value
elif weight_type == "running_mean":
A__ : Dict =value
elif weight_type == "running_var":
A__ : List[str] =value
elif weight_type == "num_batches_tracked":
A__ : Dict =value
else:
A__ : Optional[int] =value
logger.info(F'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' )
def lowercase ( UpperCamelCase : Tuple , UpperCamelCase : Tuple ):
"""simple docstring"""
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
A__ , A__ : List[str] =key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowercase ( UpperCamelCase : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : Dict ):
"""simple docstring"""
A__ : Tuple =[]
if task == "s2t":
A__ : Dict =hf_model.speechta.encoder.prenet.feature_encoder
A__ : int =MAPPING_S2T
A__ : List[Any] =IGNORE_KEYS_S2T
elif task == "t2s":
A__ : Union[str, Any] =None
A__ : List[Any] =MAPPING_T2S
A__ : Tuple =IGNORE_KEYS_T2S
elif task == "s2s":
A__ : Optional[Any] =hf_model.speechta.encoder.prenet.feature_encoder
A__ : Tuple =MAPPING_S2S
A__ : Any =IGNORE_KEYS_S2S
else:
raise ValueError(F'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(UpperCamelCase , UpperCamelCase ):
logger.info(F'''{name} was ignored''' )
continue
A__ : Optional[Any] =False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , hf_model.config.feat_extract_norm == "group" , )
A__ : List[Any] =True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
A__ , A__ : Dict =key.split(".*." )
if prefix in name and suffix in name:
A__ : int =suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
A__ : List[Any] =True
if "*" in mapped_key:
A__ : Optional[int] =name.split(UpperCamelCase )[0].split("." )[-2]
A__ : int =mapped_key.replace("*" , UpperCamelCase )
if "weight_g" in name:
A__ : str ="weight_g"
elif "weight_v" in name:
A__ : Optional[Any] ="weight_v"
elif "bias" in name:
A__ : Any ="bias"
elif "weight" in name:
A__ : Optional[int] ="weight"
elif "running_mean" in name:
A__ : Tuple ="running_mean"
elif "running_var" in name:
A__ : Optional[int] ="running_var"
elif "num_batches_tracked" in name:
A__ : str ="num_batches_tracked"
else:
A__ : List[Any] =None
set_recursively(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
continue
if not is_used:
unused_weights.append(UpperCamelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowercase ( UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : Dict ):
"""simple docstring"""
A__ : Any =full_name.split("conv_layers." )[-1]
A__ : Dict =name.split("." )
A__ : int =int(items[0] )
A__ : str =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
A__ : Optional[Any] =value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
A__ : Optional[int] =value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
A__ : Any =value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
A__ : Any =value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCamelCase )
@torch.no_grad()
def lowercase ( UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str] , UpperCamelCase : str=None , UpperCamelCase : Any=None , UpperCamelCase : Tuple=None , ):
"""simple docstring"""
if config_path is not None:
A__ : Any =SpeechTaConfig.from_pretrained(UpperCamelCase )
else:
A__ : Any =SpeechTaConfig()
if task == "s2t":
A__ : Union[str, Any] =config.max_text_positions
A__ : Dict =SpeechTaForSpeechToText(UpperCamelCase )
elif task == "t2s":
A__ : str =1876
A__ : Optional[int] =600
A__ : Tuple =config.max_speech_positions
A__ : Optional[Any] =SpeechTaForTextToSpeech(UpperCamelCase )
elif task == "s2s":
A__ : str =1876
A__ : Tuple =config.max_speech_positions
A__ : Any =SpeechTaForSpeechToSpeech(UpperCamelCase )
else:
raise ValueError(F'''Unknown task name: {task}''' )
if vocab_path:
A__ : str =SpeechTaTokenizer(UpperCamelCase , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
A__ : Optional[Any] =AddedToken("<mask>" , lstrip=UpperCamelCase , rstrip=UpperCamelCase )
A__ : int =mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
A__ : Dict =SpeechTaFeatureExtractor()
A__ : Tuple =SpeechTaProcessor(tokenizer=UpperCamelCase , feature_extractor=UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
A__ : Union[str, Any] =torch.load(UpperCamelCase )
recursively_load_weights(fairseq_checkpoint["model"] , UpperCamelCase , UpperCamelCase )
model.save_pretrained(UpperCamelCase )
if repo_id:
print("Pushing to the hub..." )
processor.push_to_hub(UpperCamelCase )
model.push_to_hub(UpperCamelCase )
if __name__ == "__main__":
__A : Dict = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
__A : str = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 656 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__A : Any = logging.get_logger(__name__)
__A : Optional[Any] = {
"EleutherAI/gpt-neo-1.3B": "https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Union[str, Any] = """gpt_neo"""
__magic_name__ : Union[str, Any] = ["""past_key_values"""]
__magic_name__ : Dict = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self : Dict , UpperCamelCase__ : List[Any]=50257 , UpperCamelCase__ : Optional[Any]=2048 , UpperCamelCase__ : Tuple=2048 , UpperCamelCase__ : int=24 , UpperCamelCase__ : Dict=[[["global", "local"], 12]] , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : str=256 , UpperCamelCase__ : List[str]="gelu_new" , UpperCamelCase__ : List[str]=0.0 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : List[str]=1E-5 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[Any]=50256 , UpperCamelCase__ : List[str]=50256 , **UpperCamelCase__ : str , ):
A__ : Optional[Any] =vocab_size
A__ : Dict =max_position_embeddings
A__ : List[str] =hidden_size
A__ : List[Any] =num_layers
A__ : Tuple =num_heads
A__ : List[str] =intermediate_size
A__ : Tuple =window_size
A__ : Dict =activation_function
A__ : str =resid_dropout
A__ : Union[str, Any] =embed_dropout
A__ : List[str] =attention_dropout
A__ : Tuple =classifier_dropout
A__ : int =layer_norm_epsilon
A__ : int =initializer_range
A__ : str =use_cache
A__ : Tuple =bos_token_id
A__ : int =eos_token_id
A__ : int =attention_types
A__ : Any =self.expand_attention_types_params(UpperCamelCase__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
@staticmethod
def _UpperCAmelCase ( UpperCamelCase__ : List[str] ):
A__ : Optional[Any] =[]
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowercase ( UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ):
"""simple docstring"""
import torch
A__ : List[str] =input.size()
A__ : Dict =len(UpperCamelCase )
A__ : Optional[int] =shape[dimension]
A__ : str =torch.arange(0 , UpperCamelCase , UpperCamelCase )
A__ : Optional[int] =torch.div(sizedim - size , UpperCamelCase , rounding_mode="floor" ) + 1
A__ : str =torch.arange(UpperCamelCase ) + low_indices[:min_length][:, None]
A__ : Tuple =[slice(UpperCamelCase )] * rank
A__ : int =indices
A__ : Optional[int] =input[s]
A__ : Union[str, Any] =list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(UpperCamelCase )
def lowercase ( UpperCamelCase : str , UpperCamelCase : Any ):
"""simple docstring"""
import torch
A__ : List[str] =torch.arange(1 , UpperCamelCase )
A__ : List[Any] =torch.remainder(UpperCamelCase , UpperCamelCase )
A__ : Optional[int] =remainders == 0
A__ : str =candidates[divisor_indices]
A__ : int =torch.max(UpperCamelCase )
return largest_divisor, torch.div(UpperCamelCase , UpperCamelCase , rounding_mode="floor" )
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
@property
def _UpperCAmelCase ( self : List[Any] ):
A__ : Optional[int] =OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction="inputs" )
A__ : Optional[int] ={0: "batch", 1: "past_sequence + sequence"}
else:
A__ : Tuple ={0: "batch", 1: "sequence"}
return common_inputs
@property
def _UpperCAmelCase ( self : List[str] ):
return self._config.num_heads
def _UpperCAmelCase ( self : int , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
A__ : Union[str, Any] =super(UpperCamelCase__ , self ).generate_dummy_inputs(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
# We need to order the input in the way they appears in the forward()
A__ : List[Any] =OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A__ , A__ : Union[str, Any] =common_inputs["input_ids"].shape
# Not using the same length for past_key_values
A__ : Union[str, Any] =seqlen + 2
A__ : List[Any] =(
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
A__ : Optional[Any] =[
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(self.num_layers )
]
A__ : Optional[Any] =common_inputs["attention_mask"]
if self.use_past:
A__ : Any =ordered_inputs["attention_mask"].dtype
A__ : Tuple =torch.cat(
[ordered_inputs["attention_mask"], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
return ordered_inputs
@property
def _UpperCAmelCase ( self : List[str] ):
return 13
| 656 | """simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase):
'''simple docstring'''
__magic_name__ : List[Any] = [R"""h\.\d+\.attn\.bias""", R"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 50257 , UpperCamelCase__ : int = 1024 , UpperCamelCase__ : int = 768 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str = "gelu_new" , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 1E-5 , UpperCamelCase__ : float = 0.02 , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , ):
super().__init__()
A__ : Dict =prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
F''' `n_embd`: {n_embd} are not equal.''' )
A__ : Optional[int] =prefix_inner_dim
A__ : Optional[int] =prefix_hidden_dim
A__ : Optional[int] =(
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A__ : Optional[int] =(
nn.Linear(self.prefix_hidden_dim , UpperCamelCase__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
A__ : str =GPTaConfig(
vocab_size=UpperCamelCase__ , n_positions=UpperCamelCase__ , n_embd=UpperCamelCase__ , n_layer=UpperCamelCase__ , n_head=UpperCamelCase__ , n_inner=UpperCamelCase__ , activation_function=UpperCamelCase__ , resid_pdrop=UpperCamelCase__ , embd_pdrop=UpperCamelCase__ , attn_pdrop=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ , initializer_range=UpperCamelCase__ , scale_attn_weights=UpperCamelCase__ , use_cache=UpperCamelCase__ , scale_attn_by_inverse_layer_idx=UpperCamelCase__ , reorder_and_upcast_attn=UpperCamelCase__ , )
A__ : Any =GPTaLMHeadModel(UpperCamelCase__ )
def _UpperCAmelCase ( self : Any , UpperCamelCase__ : torch.Tensor , UpperCamelCase__ : torch.Tensor , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[torch.Tensor] = None , ):
A__ : int =self.transformer.transformer.wte(UpperCamelCase__ )
A__ : Tuple =self.encode_prefix(UpperCamelCase__ )
A__ : Union[str, Any] =self.decode_prefix(UpperCamelCase__ )
A__ : Tuple =torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
A__ : Any =self.get_dummy_token(input_ids.shape[0] , input_ids.device )
A__ : List[Any] =torch.cat((dummy_token, input_ids) , dim=1 )
A__ : Any =self.transformer(inputs_embeds=UpperCamelCase__ , labels=UpperCamelCase__ , attention_mask=UpperCamelCase__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : torch.device ):
return torch.zeros(UpperCamelCase__ , self.prefix_length , dtype=torch.intaa , device=UpperCamelCase__ )
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Tuple ):
return self.encode_prefix(UpperCamelCase__ )
@torch.no_grad()
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str ):
A__ : Optional[int] =torch.split(UpperCamelCase__ , 1 , dim=0 )
A__ : List[str] =[]
A__ : Dict =[]
for feature in features:
A__ : Any =self.decode_prefix(feature.to(UpperCamelCase__ ) ) # back to the clip feature
# Only support beam search for now
A__ , A__ : Optional[Any] =self.generate_beam(
input_embeds=UpperCamelCase__ , device=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
A__ : Optional[Any] =torch.stack(UpperCamelCase__ )
A__ : Optional[int] =torch.stack(UpperCamelCase__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def _UpperCAmelCase ( self : List[Any] , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : int = 5 , UpperCamelCase__ : int = 67 , UpperCamelCase__ : float = 1.0 , UpperCamelCase__ : Optional[int] = None , ):
A__ : str =eos_token_id
A__ : Optional[Any] =None
A__ : int =None
A__ : Union[str, Any] =torch.ones(UpperCamelCase__ , device=UpperCamelCase__ , dtype=torch.int )
A__ : Any =torch.zeros(UpperCamelCase__ , device=UpperCamelCase__ , dtype=torch.bool )
if input_embeds is not None:
A__ : Union[str, Any] =input_embeds
else:
A__ : Optional[Any] =self.transformer.transformer.wte(UpperCamelCase__ )
for i in range(UpperCamelCase__ ):
A__ : Optional[int] =self.transformer(inputs_embeds=UpperCamelCase__ )
A__ : Tuple =outputs.logits
A__ : Union[str, Any] =logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A__ : Optional[Any] =logits.softmax(-1 ).log()
if scores is None:
A__ , A__ : Union[str, Any] =logits.topk(UpperCamelCase__ , -1 )
A__ : Union[str, Any] =generated.expand(UpperCamelCase__ , *generated.shape[1:] )
A__ , A__ : Optional[int] =next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
A__ : str =next_tokens
else:
A__ : Optional[Any] =tokens.expand(UpperCamelCase__ , *tokens.shape[1:] )
A__ : str =torch.cat((tokens, next_tokens) , dim=1 )
else:
A__ : Union[str, Any] =-float(np.inf )
A__ : Dict =0
A__ : Optional[Any] =scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A__ : Optional[Any] =scores_sum / seq_lengths[:, None]
A__ , A__ : List[Any] =scores_sum_average.view(-1 ).topk(UpperCamelCase__ , -1 )
A__ : Tuple =next_tokens // scores_sum.shape[1]
A__ : List[Any] =seq_lengths[next_tokens_source]
A__ : int =next_tokens % scores_sum.shape[1]
A__ : str =next_tokens.unsqueeze(1 )
A__ : List[Any] =tokens[next_tokens_source]
A__ : int =torch.cat((tokens, next_tokens) , dim=1 )
A__ : List[str] =generated[next_tokens_source]
A__ : Optional[Any] =scores_sum_average * seq_lengths
A__ : Optional[int] =is_stopped[next_tokens_source]
A__ : List[str] =self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
A__ : str =torch.cat((generated, next_token_embed) , dim=1 )
A__ : str =is_stopped + next_tokens.eq(UpperCamelCase__ ).squeeze()
if is_stopped.all():
break
A__ : Optional[int] =scores / seq_lengths
A__ : List[Any] =scores.argsort(descending=UpperCamelCase__ )
# tokens tensors are already padded to max_seq_length
A__ : int =[tokens[i] for i in order]
A__ : Any =torch.stack(UpperCamelCase__ , dim=0 )
A__ : int =torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 656 | 1 |
"""simple docstring"""
def lowercase ( UpperCamelCase : list[list[int]] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : list[int] ):
"""simple docstring"""
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def lowercase ( UpperCamelCase : list[list[int]] , UpperCamelCase : list[int] , UpperCamelCase : int ):
"""simple docstring"""
# Base Case
if curr_ind == len(UpperCamelCase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(UpperCamelCase ) ):
if valid_connection(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
# Insert current vertex into path as next transition
A__ : Optional[Any] =next_ver
# Validate created path
if util_hamilton_cycle(UpperCamelCase , UpperCamelCase , curr_ind + 1 ):
return True
# Backtrack
A__ : str =-1
return False
def lowercase ( UpperCamelCase : list[list[int]] , UpperCamelCase : int = 0 ):
"""simple docstring"""
A__ : int =[-1] * (len(UpperCamelCase ) + 1)
# initialize start and end of path with starting index
A__ : str =start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(UpperCamelCase , UpperCamelCase , 1 ) else []
| 656 | """simple docstring"""
import os
def lowercase ( ):
"""simple docstring"""
A__ : List[Any] =os.path.dirname(os.path.realpath(UpperCamelCase ) )
A__ : str =os.path.join(UpperCamelCase , "triangle.txt" )
with open(UpperCamelCase ) as f:
A__ : Optional[int] =f.readlines()
A__ : str =[]
for line in triangle:
A__ : Union[str, Any] =[]
for number in line.strip().split(" " ):
numbers_from_line.append(int(UpperCamelCase ) )
a.append(UpperCamelCase )
for i in range(1 , len(UpperCamelCase ) ):
for j in range(len(a[i] ) ):
A__ : Union[str, Any] =a[i - 1][j] if j != len(a[i - 1] ) else 0
A__ : Union[str, Any] =a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(UpperCamelCase , UpperCamelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 656 | 1 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : UNetaDModel
__magic_name__ : KarrasVeScheduler
def __init__( self : Any , UpperCamelCase__ : UNetaDModel , UpperCamelCase__ : KarrasVeScheduler ):
super().__init__()
self.register_modules(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
@torch.no_grad()
def __call__( self : Optional[int] , UpperCamelCase__ : int = 1 , UpperCamelCase__ : int = 50 , UpperCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , **UpperCamelCase__ : Optional[int] , ):
A__ : Dict =self.unet.config.sample_size
A__ : List[Any] =(batch_size, 3, img_size, img_size)
A__ : Tuple =self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
A__ : int =randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__ , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(UpperCamelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
A__ : str =self.scheduler.schedule[t]
A__ : str =self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
A__ , A__ : Dict =self.scheduler.add_noise_to_input(UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
A__ : Any =(sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
A__ : List[str] =self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
A__ : List[str] =(sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
A__ : str =self.scheduler.step_correct(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , step_output.prev_sample , step_output["derivative"] , )
A__ : List[str] =step_output.prev_sample
A__ : int =(sample / 2 + 0.5).clamp(0 , 1 )
A__ : Union[str, Any] =sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A__ : Optional[int] =self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase__ )
| 656 | """simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A : int = logging.get_logger(__name__)
def lowercase ( UpperCamelCase : Any ):
"""simple docstring"""
A__ : str =OrderedDict()
for key, value in state_dict.items():
if key.startswith("module.encoder" ):
A__ : Dict =key.replace("module.encoder" , "glpn.encoder" )
if key.startswith("module.decoder" ):
A__ : Optional[int] =key.replace("module.decoder" , "decoder.stages" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
A__ : Tuple =key[key.find("patch_embed" ) + len("patch_embed" )]
A__ : Optional[Any] =key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(UpperCamelCase )-1}''' )
if "norm" in key:
A__ : Dict =key.replace("norm" , "layer_norm" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
A__ : Any =key[key.find("glpn.encoder.layer_norm" ) + len("glpn.encoder.layer_norm" )]
A__ : Tuple =key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(UpperCamelCase )-1}''' )
if "layer_norm1" in key:
A__ : List[Any] =key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
A__ : Optional[int] =key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
A__ : int =key[key.find("block" ) + len("block" )]
A__ : Optional[Any] =key.replace(F'''block{idx}''' , F'''block.{int(UpperCamelCase )-1}''' )
if "attn.q" in key:
A__ : Optional[Any] =key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
A__ : Union[str, Any] =key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
A__ : str =key.replace("attn" , "attention.self" )
if "fc1" in key:
A__ : Dict =key.replace("fc1" , "dense1" )
if "fc2" in key:
A__ : str =key.replace("fc2" , "dense2" )
if "linear_pred" in key:
A__ : List[Any] =key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
A__ : List[str] =key.replace("linear_fuse.conv" , "linear_fuse" )
A__ : Any =key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
A__ : str =key[key.find("linear_c" ) + len("linear_c" )]
A__ : Dict =key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(UpperCamelCase )-1}''' )
if "bot_conv" in key:
A__ : Union[str, Any] =key.replace("bot_conv" , "0.convolution" )
if "skip_conv1" in key:
A__ : List[Any] =key.replace("skip_conv1" , "1.convolution" )
if "skip_conv2" in key:
A__ : int =key.replace("skip_conv2" , "2.convolution" )
if "fusion1" in key:
A__ : Optional[Any] =key.replace("fusion1" , "1.fusion" )
if "fusion2" in key:
A__ : Optional[Any] =key.replace("fusion2" , "2.fusion" )
if "fusion3" in key:
A__ : int =key.replace("fusion3" , "3.fusion" )
if "fusion" in key and "conv" in key:
A__ : List[str] =key.replace("conv" , "convolutional_layer" )
if key.startswith("module.last_layer_depth" ):
A__ : Tuple =key.replace("module.last_layer_depth" , "head.head" )
A__ : int =value
return new_state_dict
def lowercase ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict ):
"""simple docstring"""
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
A__ : int =state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
A__ : str =state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
A__ : List[str] =kv_weight[
: config.hidden_sizes[i], :
]
A__ : Dict =kv_bias[: config.hidden_sizes[i]]
A__ : Any =kv_weight[
config.hidden_sizes[i] :, :
]
A__ : Any =kv_bias[config.hidden_sizes[i] :]
def lowercase ( ):
"""simple docstring"""
A__ : Optional[Any] ="http://images.cocodataset.org/val2017/000000039769.jpg"
A__ : List[Any] =Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return image
@torch.no_grad()
def lowercase ( UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : List[str]=False , UpperCamelCase : str=None ):
"""simple docstring"""
A__ : List[str] =GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
A__ : str =GLPNImageProcessor()
# prepare image
A__ : Any =prepare_img()
A__ : Optional[int] =image_processor(images=UpperCamelCase , return_tensors="pt" ).pixel_values
logger.info("Converting model..." )
# load original state dict
A__ : int =torch.load(UpperCamelCase , map_location=torch.device("cpu" ) )
# rename keys
A__ : Union[str, Any] =rename_keys(UpperCamelCase )
# key and value matrices need special treatment
read_in_k_v(UpperCamelCase , UpperCamelCase )
# create HuggingFace model and load state dict
A__ : Optional[int] =GLPNForDepthEstimation(UpperCamelCase )
model.load_state_dict(UpperCamelCase )
model.eval()
# forward pass
A__ : int =model(UpperCamelCase )
A__ : Optional[Any] =outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
A__ : List[Any] =torch.tensor(
[[4.41_47, 4.08_73, 4.06_73], [3.78_90, 3.28_81, 3.15_25], [3.76_74, 3.54_23, 3.49_13]] )
elif "kitti" in model_name:
A__ : Tuple =torch.tensor(
[[3.42_91, 2.78_65, 2.51_51], [3.28_41, 2.70_21, 2.35_02], [3.11_47, 2.46_25, 2.24_81]] )
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
A__ : str =torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , UpperCamelCase , atol=1E-4 )
print("Looks ok!" )
# finally, push to hub if required
if push_to_hub:
logger.info("Pushing model and image processor to the hub..." )
model.push_to_hub(
repo_path_or_name=Path(UpperCamelCase , UpperCamelCase ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=UpperCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCamelCase , UpperCamelCase ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=UpperCamelCase , )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path",
default=None,
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
parser.add_argument(
"--model_name",
default="glpn-kitti",
type=str,
help="Name of the model in case you're pushing to the hub.",
)
__A : Any = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 656 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A : List[Any] = {"configuration_glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = ["GLPNFeatureExtractor"]
__A : List[str] = ["GLPNImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
"GLPN_PRETRAINED_MODEL_ARCHIVE_LIST",
"GLPNForDepthEstimation",
"GLPNLayer",
"GLPNModel",
"GLPNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
__A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 656 | """simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__A : Any = logging.get_logger(__name__)
__A : Optional[Any] = {
"EleutherAI/gpt-neo-1.3B": "https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Union[str, Any] = """gpt_neo"""
__magic_name__ : Union[str, Any] = ["""past_key_values"""]
__magic_name__ : Dict = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self : Dict , UpperCamelCase__ : List[Any]=50257 , UpperCamelCase__ : Optional[Any]=2048 , UpperCamelCase__ : Tuple=2048 , UpperCamelCase__ : int=24 , UpperCamelCase__ : Dict=[[["global", "local"], 12]] , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : str=256 , UpperCamelCase__ : List[str]="gelu_new" , UpperCamelCase__ : List[str]=0.0 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : List[str]=1E-5 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[Any]=50256 , UpperCamelCase__ : List[str]=50256 , **UpperCamelCase__ : str , ):
A__ : Optional[Any] =vocab_size
A__ : Dict =max_position_embeddings
A__ : List[str] =hidden_size
A__ : List[Any] =num_layers
A__ : Tuple =num_heads
A__ : List[str] =intermediate_size
A__ : Tuple =window_size
A__ : Dict =activation_function
A__ : str =resid_dropout
A__ : Union[str, Any] =embed_dropout
A__ : List[str] =attention_dropout
A__ : Tuple =classifier_dropout
A__ : int =layer_norm_epsilon
A__ : int =initializer_range
A__ : str =use_cache
A__ : Tuple =bos_token_id
A__ : int =eos_token_id
A__ : int =attention_types
A__ : Any =self.expand_attention_types_params(UpperCamelCase__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
@staticmethod
def _UpperCAmelCase ( UpperCamelCase__ : List[str] ):
A__ : Optional[Any] =[]
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowercase ( UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ):
"""simple docstring"""
import torch
A__ : List[str] =input.size()
A__ : Dict =len(UpperCamelCase )
A__ : Optional[int] =shape[dimension]
A__ : str =torch.arange(0 , UpperCamelCase , UpperCamelCase )
A__ : Optional[int] =torch.div(sizedim - size , UpperCamelCase , rounding_mode="floor" ) + 1
A__ : str =torch.arange(UpperCamelCase ) + low_indices[:min_length][:, None]
A__ : Tuple =[slice(UpperCamelCase )] * rank
A__ : int =indices
A__ : Optional[int] =input[s]
A__ : Union[str, Any] =list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(UpperCamelCase )
def lowercase ( UpperCamelCase : str , UpperCamelCase : Any ):
"""simple docstring"""
import torch
A__ : List[str] =torch.arange(1 , UpperCamelCase )
A__ : List[Any] =torch.remainder(UpperCamelCase , UpperCamelCase )
A__ : Optional[int] =remainders == 0
A__ : str =candidates[divisor_indices]
A__ : int =torch.max(UpperCamelCase )
return largest_divisor, torch.div(UpperCamelCase , UpperCamelCase , rounding_mode="floor" )
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
@property
def _UpperCAmelCase ( self : List[Any] ):
A__ : Optional[int] =OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction="inputs" )
A__ : Optional[int] ={0: "batch", 1: "past_sequence + sequence"}
else:
A__ : Tuple ={0: "batch", 1: "sequence"}
return common_inputs
@property
def _UpperCAmelCase ( self : List[str] ):
return self._config.num_heads
def _UpperCAmelCase ( self : int , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
A__ : Union[str, Any] =super(UpperCamelCase__ , self ).generate_dummy_inputs(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
# We need to order the input in the way they appears in the forward()
A__ : List[Any] =OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A__ , A__ : Union[str, Any] =common_inputs["input_ids"].shape
# Not using the same length for past_key_values
A__ : Union[str, Any] =seqlen + 2
A__ : List[Any] =(
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
A__ : Optional[Any] =[
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(self.num_layers )
]
A__ : Optional[Any] =common_inputs["attention_mask"]
if self.use_past:
A__ : Any =ordered_inputs["attention_mask"].dtype
A__ : Tuple =torch.cat(
[ordered_inputs["attention_mask"], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
return ordered_inputs
@property
def _UpperCAmelCase ( self : List[str] ):
return 13
| 656 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__A : Optional[int] = None
__A : Union[str, Any] = logging.get_logger(__name__)
__A : List[Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__A : str = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
},
"tokenizer_file": {
"google/bigbird-roberta-base": (
"https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"
),
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"
),
},
}
__A : List[str] = {
"google/bigbird-roberta-base": 4_096,
"google/bigbird-roberta-large": 4_096,
"google/bigbird-base-trivia-itc": 4_096,
}
__A : Tuple = "▁"
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Dict = VOCAB_FILES_NAMES
__magic_name__ : Any = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : List[Any] = BigBirdTokenizer
__magic_name__ : Any = ["""input_ids""", """attention_mask"""]
__magic_name__ : List[int] = []
def __init__( self : str , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Union[str, Any]="<unk>" , UpperCamelCase__ : str="<s>" , UpperCamelCase__ : int="</s>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[Any]="[SEP]" , UpperCamelCase__ : List[Any]="[MASK]" , UpperCamelCase__ : str="[CLS]" , **UpperCamelCase__ : List[Any] , ):
A__ : Optional[int] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else bos_token
A__ : Optional[Any] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token
A__ : Optional[int] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else unk_token
A__ : int =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token
A__ : str =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cls_token
A__ : List[Any] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
A__ : str =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , **UpperCamelCase__ , )
A__ : List[Any] =vocab_file
A__ : Optional[int] =False if not self.vocab_file else True
def _UpperCAmelCase ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : Tuple =[self.sep_token_id]
A__ : str =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1]
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : Tuple =[self.sep_token_id]
A__ : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A__ : List[str] =os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
return (out_vocab_file,)
| 656 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Union[str, Any] = logging.get_logger(__name__)
__A : Any = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Tuple = """megatron-bert"""
def __init__( self : Tuple , UpperCamelCase__ : Dict=29056 , UpperCamelCase__ : int=1024 , UpperCamelCase__ : Optional[int]=24 , UpperCamelCase__ : Dict=16 , UpperCamelCase__ : int=4096 , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : int=512 , UpperCamelCase__ : str=2 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : str="absolute" , UpperCamelCase__ : Dict=True , **UpperCamelCase__ : Tuple , ):
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A__ : Optional[int] =vocab_size
A__ : Optional[int] =hidden_size
A__ : str =num_hidden_layers
A__ : Any =num_attention_heads
A__ : str =hidden_act
A__ : Optional[int] =intermediate_size
A__ : str =hidden_dropout_prob
A__ : str =attention_probs_dropout_prob
A__ : List[Any] =max_position_embeddings
A__ : List[Any] =type_vocab_size
A__ : Tuple =initializer_range
A__ : Any =layer_norm_eps
A__ : Any =position_embedding_type
A__ : Union[str, Any] =use_cache
| 656 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
def lowercase ( UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float ):
"""simple docstring"""
A__ : Optional[int] =namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 656 | """simple docstring"""
from __future__ import annotations
def lowercase ( UpperCamelCase : list[float] ):
"""simple docstring"""
if len(UpperCamelCase ) < 2:
raise ValueError("Monogons and Digons are not polygons in the Euclidean space" )
if any(i <= 0 for i in nums ):
raise ValueError("All values must be greater than 0" )
A__ : Union[str, Any] =nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 656 | 1 |
"""simple docstring"""
from __future__ import annotations
from random import choice
def lowercase ( UpperCamelCase : Tuple ):
"""simple docstring"""
return choice(UpperCamelCase )
def lowercase ( UpperCamelCase : list[int] , UpperCamelCase : int ):
"""simple docstring"""
A__ : Dict =random_pivot(UpperCamelCase )
# partition based on pivot
# linear time
A__ : List[Any] =[e for e in lst if e < pivot]
A__ : Optional[Any] =[e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(UpperCamelCase ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(UpperCamelCase ) < k - 1:
return kth_number(UpperCamelCase , k - len(UpperCamelCase ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 656 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__A : Optional[Any] = {
"configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
"MEGA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegaForCausalLM",
"MegaForMaskedLM",
"MegaForMultipleChoice",
"MegaForQuestionAnswering",
"MegaForSequenceClassification",
"MegaForTokenClassification",
"MegaModel",
"MegaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__A : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 656 | 1 |
"""simple docstring"""
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __lowerCAmelCase ( pl.LightningModule):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : Tuple ):
super().__init__()
A__ : Any =model
A__ : Any =2
A__ : Dict =nn.Linear(self.model.config.hidden_size , self.num_labels )
def _UpperCAmelCase ( self : List[str] ):
pass
def lowercase ( UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : str ):
"""simple docstring"""
# load longformer model from model identifier
A__ : Union[str, Any] =LongformerModel.from_pretrained(UpperCamelCase )
A__ : str =LightningModel(UpperCamelCase )
A__ : List[str] =torch.load(UpperCamelCase , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
A__ : List[Any] =LongformerForQuestionAnswering.from_pretrained(UpperCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(UpperCamelCase )
print(F'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__A : int = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 656 | """simple docstring"""
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
if num <= 0:
raise ValueError("Input must be a positive integer" )
A__ : Union[str, Any] =[True] * (num + 1)
A__ : Union[str, Any] =2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , UpperCamelCase ):
A__ : str =False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Optional[int] = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 656 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : List[str] = {
"configuration_whisper": ["WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP", "WhisperConfig", "WhisperOnnxConfig"],
"feature_extraction_whisper": ["WhisperFeatureExtractor"],
"processing_whisper": ["WhisperProcessor"],
"tokenization_whisper": ["WhisperTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = ["WhisperTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
"WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST",
"WhisperForConditionalGeneration",
"WhisperModel",
"WhisperPreTrainedModel",
"WhisperForAudioClassification",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
"TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWhisperForConditionalGeneration",
"TFWhisperModel",
"TFWhisperPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
"FlaxWhisperForConditionalGeneration",
"FlaxWhisperModel",
"FlaxWhisperPreTrainedModel",
"FlaxWhisperForAudioClassification",
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
__A : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 656 | """simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
def _UpperCAmelCase ( self : List[Any] ):
A__ : Tuple =torch.nn.Linear(10 , 10 )
A__ : List[str] =torch.optim.SGD(model.parameters() , 0.1 )
A__ : Union[str, Any] =Accelerator()
A__ : str =accelerator.prepare(UpperCamelCase__ )
try:
pickle.loads(pickle.dumps(UpperCamelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 656 | 1 |
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
__A : Optional[Any] = 3
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
print("Generating primitive root of p" )
while True:
A__ : Optional[int] =random.randrange(3 , UpperCamelCase )
if pow(UpperCamelCase , 2 , UpperCamelCase ) == 1:
continue
if pow(UpperCamelCase , UpperCamelCase , UpperCamelCase ) == 1:
continue
return g
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
print("Generating prime p..." )
A__ : int =rabin_miller.generate_large_prime(UpperCamelCase ) # select large prime number.
A__ : Dict =primitive_root(UpperCamelCase ) # one primitive root on modulo p.
A__ : Any =random.randrange(3 , UpperCamelCase ) # private_key -> have to be greater than 2 for safety.
A__ : int =cryptomath.find_mod_inverse(pow(UpperCamelCase , UpperCamelCase , UpperCamelCase ) , UpperCamelCase )
A__ : int =(key_size, e_a, e_a, p)
A__ : Union[str, Any] =(key_size, d)
return public_key, private_key
def lowercase ( UpperCamelCase : str , UpperCamelCase : int ):
"""simple docstring"""
if os.path.exists(F'''{name}_pubkey.txt''' ) or os.path.exists(F'''{name}_privkey.txt''' ):
print("\nWARNING:" )
print(
F'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
"Use a different name or delete these files and re-run this program." )
sys.exit()
A__ , A__ : Any =generate_key(UpperCamelCase )
print(F'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(F'''{name}_pubkey.txt''' , "w" ) as fo:
fo.write(F'''{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}''' )
print(F'''Writing private key to file {name}_privkey.txt...''' )
with open(F'''{name}_privkey.txt''' , "w" ) as fo:
fo.write(F'''{private_key[0]},{private_key[1]}''' )
def lowercase ( ):
"""simple docstring"""
print("Making key files..." )
make_key_files("elgamal" , 2048 )
print("Key files generation successful" )
if __name__ == "__main__":
main()
| 656 | """simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__A : Optional[int] = None
__A : Union[str, Any] = logging.get_logger(__name__)
__A : List[Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__A : str = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
},
"tokenizer_file": {
"google/bigbird-roberta-base": (
"https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"
),
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"
),
},
}
__A : List[str] = {
"google/bigbird-roberta-base": 4_096,
"google/bigbird-roberta-large": 4_096,
"google/bigbird-base-trivia-itc": 4_096,
}
__A : Tuple = "▁"
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Dict = VOCAB_FILES_NAMES
__magic_name__ : Any = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : List[Any] = BigBirdTokenizer
__magic_name__ : Any = ["""input_ids""", """attention_mask"""]
__magic_name__ : List[int] = []
def __init__( self : str , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Union[str, Any]="<unk>" , UpperCamelCase__ : str="<s>" , UpperCamelCase__ : int="</s>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[Any]="[SEP]" , UpperCamelCase__ : List[Any]="[MASK]" , UpperCamelCase__ : str="[CLS]" , **UpperCamelCase__ : List[Any] , ):
A__ : Optional[int] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else bos_token
A__ : Optional[Any] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token
A__ : Optional[int] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else unk_token
A__ : int =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token
A__ : str =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cls_token
A__ : List[Any] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
A__ : str =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , **UpperCamelCase__ , )
A__ : List[Any] =vocab_file
A__ : Optional[int] =False if not self.vocab_file else True
def _UpperCAmelCase ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : Tuple =[self.sep_token_id]
A__ : str =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1]
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : Tuple =[self.sep_token_id]
A__ : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A__ : List[str] =os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
return (out_vocab_file,)
| 656 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__A : Optional[Any] = logging.get_logger(__name__)
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Dict = ["""pixel_values"""]
def __init__( self : Optional[int] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : List[str] , ):
super().__init__(**UpperCamelCase__ )
A__ : int =size if size is not None else {"height": 224, "width": 224}
A__ : Union[str, Any] =get_size_dict(UpperCamelCase__ )
A__ : Dict =crop_size if crop_size is not None else {"height": 224, "width": 224}
A__ : int =get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ , param_name="crop_size" )
A__ : str =do_resize
A__ : Any =do_rescale
A__ : Union[str, Any] =do_normalize
A__ : Optional[int] =do_center_crop
A__ : List[str] =crop_size
A__ : Union[str, Any] =size
A__ : int =resample
A__ : List[Any] =rescale_factor
A__ : str =image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
A__ : int =image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
A__ : Optional[int] =get_size_dict(UpperCamelCase__ )
if "shortest_edge" in size:
A__ : Dict =get_resize_output_image_size(UpperCamelCase__ , size=size["shortest_edge"] , default_to_square=UpperCamelCase__ )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
A__ : List[Any] =(size["height"], size["width"])
else:
raise ValueError(F'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' )
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def _UpperCAmelCase ( self : int , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : List[Any] , ):
A__ : str =get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(UpperCamelCase__ , size=(size["height"], size["width"]) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Union[str, Any] ):
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ):
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : Optional[int] , ):
A__ : int =do_resize if do_resize is not None else self.do_resize
A__ : Optional[Any] =do_rescale if do_rescale is not None else self.do_rescale
A__ : Optional[Any] =do_normalize if do_normalize is not None else self.do_normalize
A__ : List[str] =do_center_crop if do_center_crop is not None else self.do_center_crop
A__ : Union[str, Any] =crop_size if crop_size is not None else self.crop_size
A__ : Optional[Any] =get_size_dict(UpperCamelCase__ , param_name="crop_size" , default_to_square=UpperCamelCase__ )
A__ : Tuple =resample if resample is not None else self.resample
A__ : List[Any] =rescale_factor if rescale_factor is not None else self.rescale_factor
A__ : List[Any] =image_mean if image_mean is not None else self.image_mean
A__ : Dict =image_std if image_std is not None else self.image_std
A__ : List[str] =size if size is not None else self.size
A__ : List[str] =get_size_dict(UpperCamelCase__ )
if not is_batched(UpperCamelCase__ ):
A__ : Optional[int] =[images]
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
A__ : Tuple =[to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
A__ : List[Any] =[self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_center_crop:
A__ : Tuple =[self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images]
if do_rescale:
A__ : Any =[self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
A__ : Tuple =[self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
A__ : List[Any] =[to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
A__ : int ={"pixel_values": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 656 | """simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Optional[int] = {"vocab_file": "spiece.model"}
__A : List[Any] = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Dict="<s>" , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : Union[str, Any]="<unk>" , UpperCamelCase__ : Optional[int]="<sep>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[int]="<cls>" , UpperCamelCase__ : List[str]="<mask>" , UpperCamelCase__ : Optional[Any]=["<eop>", "<eod>"] , UpperCamelCase__ : Optional[Dict[str, Any]] = None , **UpperCamelCase__ : Dict , ):
A__ : List[str] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
A__ : Tuple ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
A__ : Dict =3
A__ : int =do_lower_case
A__ : str =remove_space
A__ : Optional[Any] =keep_accents
A__ : int =vocab_file
A__ : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
A__ : Union[str, Any] =jieba
A__ : List[str] =str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _UpperCAmelCase ( self : Union[str, Any] ):
return len(self.sp_model )
def _UpperCAmelCase ( self : Optional[int] ):
A__ : Any ={self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ):
A__ : Union[str, Any] =self.__dict__.copy()
A__ : Tuple =None
return state
def __setstate__( self : Tuple , UpperCamelCase__ : int ):
A__ : Union[str, Any] =d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A__ : Optional[int] ={}
A__ : Union[str, Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Dict ):
if self.remove_space:
A__ : Optional[int] =" ".join(inputs.strip().split() )
else:
A__ : Optional[Any] =inputs
A__ : Any =outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
A__ : Optional[Any] =unicodedata.normalize("NFKD" , UpperCamelCase__ )
A__ : Tuple ="".join([c for c in outputs if not unicodedata.combining(UpperCamelCase__ )] )
if self.do_lower_case:
A__ : str =outputs.lower()
return outputs
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : str ):
A__ : Optional[int] =self.preprocess_text(UpperCamelCase__ )
A__ : Dict =self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
A__ : List[str] =[]
for piece in pieces:
if len(UpperCamelCase__ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
A__ : str =self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase__ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A__ : Union[str, Any] =cur_pieces[1:]
else:
A__ : List[str] =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCamelCase__ )
else:
new_pieces.append(UpperCamelCase__ )
return new_pieces
def _UpperCAmelCase ( self : int , UpperCamelCase__ : str ):
return self.sp_model.PieceToId(UpperCamelCase__ )
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : List[Any] ):
return self.sp_model.IdToPiece(UpperCamelCase__ )
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : str ):
A__ : Optional[int] ="".join(UpperCamelCase__ ).replace(UpperCamelCase__ , " " ).strip()
return out_string
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : List[str] =[self.sep_token_id]
A__ : str =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is not None:
return ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1, 1]
return ([0] * len(UpperCamelCase__ )) + [1, 1]
def _UpperCAmelCase ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : List[str] =[self.sep_token_id]
A__ : Optional[Any] =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A__ : Tuple =os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
A__ : Optional[Any] =self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
def _UpperCAmelCase ( self : str , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : int ):
A__ : List[Any] =super()._decode(*UpperCamelCase__ , **UpperCamelCase__ )
A__ : Union[str, Any] =text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text
| 656 | 1 |
"""simple docstring"""
import math
import tensorflow as tf
from packaging import version
def lowercase ( UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A__ : List[Any] =tf.convert_to_tensor(UpperCamelCase )
A__ : List[Any] =0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def lowercase ( UpperCamelCase : Optional[int] ):
"""simple docstring"""
A__ : Optional[Any] =tf.convert_to_tensor(UpperCamelCase )
A__ : Tuple =tf.cast(math.pi , x.dtype )
A__ : Dict =tf.cast(0.04_47_15 , x.dtype )
A__ : Optional[int] =0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(UpperCamelCase , 3 )) ))
return x * cdf
def lowercase ( UpperCamelCase : Optional[int] ):
"""simple docstring"""
A__ : List[str] =tf.convert_to_tensor(UpperCamelCase )
return x * tf.tanh(tf.math.softplus(UpperCamelCase ) )
def lowercase ( UpperCamelCase : List[str] ):
"""simple docstring"""
A__ : Union[str, Any] =tf.convert_to_tensor(UpperCamelCase )
A__ : List[Any] =tf.cast(0.04_47_15 , x.dtype )
A__ : List[Any] =tf.cast(0.79_78_84_56_08 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowercase ( UpperCamelCase : List[Any] ):
"""simple docstring"""
A__ : List[str] =tf.convert_to_tensor(UpperCamelCase )
A__ : str =tf.cast(1.7_02 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowercase ( UpperCamelCase : Tuple ):
"""simple docstring"""
return tf.clip_by_value(_gelu(UpperCamelCase ) , -10 , 10 )
def lowercase ( UpperCamelCase : str , UpperCamelCase : Any=-1 ):
"""simple docstring"""
A__ , A__ : Optional[Any] =tf.split(UpperCamelCase , 2 , axis=UpperCamelCase )
return a * tf.math.sigmoid(UpperCamelCase )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
return tf.keras.activations.gelu(UpperCamelCase , approximate=UpperCamelCase )
__A : Optional[Any] = tf.keras.activations.gelu
__A : Optional[Any] = approximate_gelu_wrap
else:
__A : Any = _gelu
__A : Union[str, Any] = _gelu_new
__A : List[str] = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def lowercase ( UpperCamelCase : List[Any] ):
"""simple docstring"""
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
| 656 | """simple docstring"""
def lowercase ( UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ):
"""simple docstring"""
def count_of_possible_combinations(UpperCamelCase : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(UpperCamelCase )
def lowercase ( UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ):
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
UpperCamelCase : int , UpperCamelCase : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
A__ : str =sum(
count_of_possible_combinations_with_dp_array(target - item , UpperCamelCase )
for item in array )
A__ : List[str] =answer
return answer
A__ : List[Any] =[-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(UpperCamelCase , UpperCamelCase )
def lowercase ( UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ):
"""simple docstring"""
A__ : str =[0] * (target + 1)
A__ : Optional[Any] =1
for i in range(1 , target + 1 ):
for j in range(UpperCamelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Optional[Any] = 3
__A : Optional[Any] = 5
__A : int = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 656 | 1 |
"""simple docstring"""
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
__magic_name__ : Union[str, Any] = JukeboxTokenizer
__magic_name__ : Optional[Any] = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,
Who said \"Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def _UpperCAmelCase ( self : Any ):
import torch
A__ : Union[str, Any] =JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" )
A__ : int =tokenizer(**self.metas )["input_ids"]
# fmt: off
A__ : Optional[Any] =[
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def _UpperCAmelCase ( self : List[Any] ):
import torch
A__ : Tuple =JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" )
A__ : Union[str, Any] =tokenizer(**self.metas )["input_ids"]
# fmt: off
A__ : Optional[int] =[
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 656 | """simple docstring"""
import math
import tensorflow as tf
from packaging import version
def lowercase ( UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A__ : List[Any] =tf.convert_to_tensor(UpperCamelCase )
A__ : List[Any] =0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def lowercase ( UpperCamelCase : Optional[int] ):
"""simple docstring"""
A__ : Optional[Any] =tf.convert_to_tensor(UpperCamelCase )
A__ : Tuple =tf.cast(math.pi , x.dtype )
A__ : Dict =tf.cast(0.04_47_15 , x.dtype )
A__ : Optional[int] =0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(UpperCamelCase , 3 )) ))
return x * cdf
def lowercase ( UpperCamelCase : Optional[int] ):
"""simple docstring"""
A__ : List[str] =tf.convert_to_tensor(UpperCamelCase )
return x * tf.tanh(tf.math.softplus(UpperCamelCase ) )
def lowercase ( UpperCamelCase : List[str] ):
"""simple docstring"""
A__ : Union[str, Any] =tf.convert_to_tensor(UpperCamelCase )
A__ : List[Any] =tf.cast(0.04_47_15 , x.dtype )
A__ : List[Any] =tf.cast(0.79_78_84_56_08 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowercase ( UpperCamelCase : List[Any] ):
"""simple docstring"""
A__ : List[str] =tf.convert_to_tensor(UpperCamelCase )
A__ : str =tf.cast(1.7_02 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowercase ( UpperCamelCase : Tuple ):
"""simple docstring"""
return tf.clip_by_value(_gelu(UpperCamelCase ) , -10 , 10 )
def lowercase ( UpperCamelCase : str , UpperCamelCase : Any=-1 ):
"""simple docstring"""
A__ , A__ : Optional[Any] =tf.split(UpperCamelCase , 2 , axis=UpperCamelCase )
return a * tf.math.sigmoid(UpperCamelCase )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
return tf.keras.activations.gelu(UpperCamelCase , approximate=UpperCamelCase )
__A : Optional[Any] = tf.keras.activations.gelu
__A : Optional[Any] = approximate_gelu_wrap
else:
__A : Any = _gelu
__A : Union[str, Any] = _gelu_new
__A : List[str] = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def lowercase ( UpperCamelCase : List[Any] ):
"""simple docstring"""
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
| 656 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def lowercase ( UpperCamelCase : str="" ):
"""simple docstring"""
A__ : Optional[int] =tempfile.mkdtemp()
return os.path.join(UpperCamelCase , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
def _UpperCAmelCase ( self : Tuple ):
A__ : Dict =torch.rand(12 , dtype=torch.floataa ) - 0.5
A__ : Optional[int] =AgentAudio(UpperCamelCase__ )
A__ : int =str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(UpperCamelCase__ , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(UpperCamelCase__ ) )
# Ensure that the file contains the same value as the original tensor
A__ , A__ : List[str] =sf.read(UpperCamelCase__ )
self.assertTrue(torch.allclose(UpperCamelCase__ , torch.tensor(UpperCamelCase__ ) , atol=1E-4 ) )
def _UpperCAmelCase ( self : Tuple ):
A__ : List[Any] =torch.rand(12 , dtype=torch.floataa ) - 0.5
A__ : Dict =get_new_path(suffix=".wav" )
sf.write(UpperCamelCase__ , UpperCamelCase__ , 16000 )
A__ : str =AgentAudio(UpperCamelCase__ )
self.assertTrue(torch.allclose(UpperCamelCase__ , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , UpperCamelCase__ )
@require_vision
@require_torch
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
def _UpperCAmelCase ( self : int ):
A__ : Dict =torch.randint(0 , 256 , (64, 64, 3) )
A__ : Dict =AgentImage(UpperCamelCase__ )
A__ : List[Any] =str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(UpperCamelCase__ , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(UpperCamelCase__ ) )
def _UpperCAmelCase ( self : List[Any] ):
A__ : Optional[int] =Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
A__ : Any =Image.open(UpperCamelCase__ )
A__ : Optional[int] =AgentImage(UpperCamelCase__ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(UpperCamelCase__ ) )
def _UpperCAmelCase ( self : int ):
A__ : Any =Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
A__ : List[str] =Image.open(UpperCamelCase__ )
A__ : Optional[int] =AgentImage(UpperCamelCase__ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(UpperCamelCase__ ) )
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
def _UpperCAmelCase ( self : int ):
A__ : int ="Hey!"
A__ : Optional[Any] =AgentText(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , agent_type.to_string() )
self.assertEqual(UpperCamelCase__ , agent_type.to_raw() )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
| 656 | """simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def _UpperCAmelCase ( self : Dict ):
A__ : Optional[Any] =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(UpperCamelCase__ , "num_attention_heads" ) )
self.parent.assertTrue(hasattr(UpperCamelCase__ , "num_encoder_blocks" ) )
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=13 , UpperCamelCase__ : Tuple=64 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : Union[str, Any]=4 , UpperCamelCase__ : Dict=[2, 2, 2, 2] , UpperCamelCase__ : Union[str, Any]=[8, 4, 2, 1] , UpperCamelCase__ : Tuple=[16, 32, 64, 128] , UpperCamelCase__ : Optional[int]=[1, 4, 8, 16] , UpperCamelCase__ : Any=[1, 2, 4, 8] , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Dict="gelu" , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Optional[Any]=None , ):
A__ : Tuple =parent
A__ : List[Any] =batch_size
A__ : List[Any] =image_size
A__ : Union[str, Any] =num_channels
A__ : Optional[int] =num_encoder_blocks
A__ : Any =sr_ratios
A__ : Any =depths
A__ : List[Any] =hidden_sizes
A__ : List[Any] =downsampling_rates
A__ : List[str] =num_attention_heads
A__ : int =is_training
A__ : List[Any] =use_labels
A__ : Any =hidden_act
A__ : Dict =hidden_dropout_prob
A__ : int =attention_probs_dropout_prob
A__ : List[Any] =initializer_range
A__ : Tuple =num_labels
A__ : List[Any] =scope
def _UpperCAmelCase ( self : Optional[int] ):
A__ : List[str] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ : Any =None
if self.use_labels:
A__ : Tuple =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A__ : List[Any] =self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self : Tuple ):
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ):
A__ : Any =SegformerModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Dict =model(UpperCamelCase__ )
A__ : Optional[int] =self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def _UpperCAmelCase ( self : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ):
A__ : str =self.num_labels
A__ : Optional[Any] =SegformerForSemanticSegmentation(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Optional[Any] =model(UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
A__ : List[Any] =model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def _UpperCAmelCase ( self : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ):
A__ : Tuple =1
A__ : Tuple =SegformerForSemanticSegmentation(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : List[str] =torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(UpperCamelCase__ )
A__ : Dict =model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertGreater(result.loss , 0.0 )
def _UpperCAmelCase ( self : str ):
A__ : Union[str, Any] =self.prepare_config_and_inputs()
A__ , A__ , A__ : Tuple =config_and_inputs
A__ : Tuple ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Dict = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__magic_name__ : Optional[int] = (
{
"""feature-extraction""": SegformerModel,
"""image-classification""": SegformerForImageClassification,
"""image-segmentation""": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__magic_name__ : Dict = True
__magic_name__ : List[str] = False
__magic_name__ : Optional[Any] = False
__magic_name__ : str = False
def _UpperCAmelCase ( self : Union[str, Any] ):
A__ : Union[str, Any] =SegformerModelTester(self )
A__ : Tuple =SegformerConfigTester(self , config_class=UpperCamelCase__ )
def _UpperCAmelCase ( self : str ):
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : Dict ):
A__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _UpperCAmelCase ( self : Tuple ):
A__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*UpperCamelCase__ )
def _UpperCAmelCase ( self : Union[str, Any] ):
A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*UpperCamelCase__ )
@unittest.skip("SegFormer does not use inputs_embeds" )
def _UpperCAmelCase ( self : Dict ):
pass
@unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods" )
def _UpperCAmelCase ( self : Tuple ):
pass
def _UpperCAmelCase ( self : List[str] ):
A__ , A__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : int =model_class(UpperCamelCase__ )
A__ : Optional[int] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : Optional[int] =[*signature.parameters.keys()]
A__ : List[str] =["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def _UpperCAmelCase ( self : str ):
A__ , A__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
A__ : Union[str, Any] =True
for model_class in self.all_model_classes:
A__ : Optional[Any] =True
A__ : Union[str, Any] =False
A__ : str =True
A__ : Optional[int] =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : str =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Any =outputs.attentions
A__ : List[str] =sum(self.model_tester.depths )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ : Dict =True
A__ : str =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Any =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Union[str, Any] =outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# verify the first attentions (first block, first layer)
A__ : List[Any] =(self.model_tester.image_size // 4) ** 2
A__ : Tuple =(self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
A__ : Tuple =(self.model_tester.image_size // 32) ** 2
A__ : Optional[Any] =(self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
A__ : int =len(UpperCamelCase__ )
# Check attention is always last and order is fine
A__ : Optional[Any] =True
A__ : Any =True
A__ : Union[str, Any] =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Optional[Any] =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + 1 , len(UpperCamelCase__ ) )
A__ : Optional[Any] =outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# verify the first attentions (first block, first layer)
A__ : Union[str, Any] =(self.model_tester.image_size // 4) ** 2
A__ : Tuple =(self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _UpperCAmelCase ( self : List[Any] ):
def check_hidden_states_output(UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ):
A__ : Optional[Any] =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : List[Any] =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Optional[Any] =outputs.hidden_states
A__ : int =self.model_tester.num_encoder_blocks
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
A__ , A__ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Optional[Any] =True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ : str =True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _UpperCAmelCase ( self : Optional[int] ):
if not self.model_tester.is_training:
return
A__ , A__ : int =self.model_tester.prepare_config_and_inputs_for_common()
A__ : List[Any] =True
for model_class in self.all_model_classes:
if model_class in get_values(UpperCamelCase__ ):
continue
A__ : List[Any] =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
A__ : int =self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
A__ : Union[str, Any] =model(**UpperCamelCase__ ).loss
loss.backward()
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _UpperCAmelCase ( self : Tuple ):
pass
@slow
def _UpperCAmelCase ( self : Tuple ):
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Tuple =SegformerModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def lowercase ( ):
"""simple docstring"""
A__ : List[Any] =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
@slow
def _UpperCAmelCase ( self : Tuple ):
# only resize + normalize
A__ : List[Any] =SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCamelCase__ , align=UpperCamelCase__ , do_random_crop=UpperCamelCase__ )
A__ : Union[str, Any] =SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
UpperCamelCase__ )
A__ : Union[str, Any] =prepare_img()
A__ : Union[str, Any] =image_processor(images=UpperCamelCase__ , return_tensors="pt" )
A__ : int =encoded_inputs.pixel_values.to(UpperCamelCase__ )
with torch.no_grad():
A__ : int =model(UpperCamelCase__ )
A__ : Dict =torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A__ : Optional[int] =torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
@slow
def _UpperCAmelCase ( self : Union[str, Any] ):
# only resize + normalize
A__ : Dict =SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCamelCase__ , align=UpperCamelCase__ , do_random_crop=UpperCamelCase__ )
A__ : int =SegformerForSemanticSegmentation.from_pretrained(
"nvidia/segformer-b1-finetuned-cityscapes-1024-1024" ).to(UpperCamelCase__ )
A__ : Tuple =prepare_img()
A__ : str =image_processor(images=UpperCamelCase__ , return_tensors="pt" )
A__ : Optional[int] =encoded_inputs.pixel_values.to(UpperCamelCase__ )
with torch.no_grad():
A__ : int =model(UpperCamelCase__ )
A__ : List[str] =torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A__ : List[Any] =torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1E-1 ) )
@slow
def _UpperCAmelCase ( self : int ):
# only resize + normalize
A__ : Optional[Any] =SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCamelCase__ , align=UpperCamelCase__ , do_random_crop=UpperCamelCase__ )
A__ : List[Any] =SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
UpperCamelCase__ )
A__ : str =prepare_img()
A__ : Dict =image_processor(images=UpperCamelCase__ , return_tensors="pt" )
A__ : Any =encoded_inputs.pixel_values.to(UpperCamelCase__ )
with torch.no_grad():
A__ : Dict =model(UpperCamelCase__ )
A__ : Any =outputs.logits.detach().cpu()
A__ : Union[str, Any] =image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(500, 300)] )
A__ : List[str] =torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
A__ : int =image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ )
A__ : Tuple =torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
| 656 | 1 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : int = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Optional[int] = """unispeech"""
def __init__( self : Any , UpperCamelCase__ : int=32 , UpperCamelCase__ : Dict=768 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : Tuple=3072 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : List[Any]=0.0 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : List[str]=1E-5 , UpperCamelCase__ : str="group" , UpperCamelCase__ : Union[str, Any]="gelu" , UpperCamelCase__ : Tuple=(512, 512, 512, 512, 512, 512, 512) , UpperCamelCase__ : Optional[int]=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase__ : Optional[Any]=(10, 3, 3, 3, 3, 2, 2) , UpperCamelCase__ : Dict=False , UpperCamelCase__ : int=128 , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Dict=0.05 , UpperCamelCase__ : str=10 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Optional[Any]=10 , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : Tuple=320 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Tuple=100 , UpperCamelCase__ : str=256 , UpperCamelCase__ : List[str]=256 , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : int="mean" , UpperCamelCase__ : str=False , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Any=256 , UpperCamelCase__ : List[Any]=80 , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : Optional[Any]=0.5 , **UpperCamelCase__ : int , ):
super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
A__ : int =hidden_size
A__ : str =feat_extract_norm
A__ : Tuple =feat_extract_activation
A__ : Union[str, Any] =list(UpperCamelCase__ )
A__ : str =list(UpperCamelCase__ )
A__ : List[Any] =list(UpperCamelCase__ )
A__ : Optional[int] =conv_bias
A__ : str =num_conv_pos_embeddings
A__ : List[Any] =num_conv_pos_embedding_groups
A__ : str =len(self.conv_dim )
A__ : Any =num_hidden_layers
A__ : Tuple =intermediate_size
A__ : Any =hidden_act
A__ : Any =num_attention_heads
A__ : List[Any] =hidden_dropout
A__ : Optional[int] =attention_dropout
A__ : Dict =activation_dropout
A__ : Optional[int] =feat_proj_dropout
A__ : Optional[int] =final_dropout
A__ : Optional[int] =layerdrop
A__ : List[str] =layer_norm_eps
A__ : Optional[int] =initializer_range
A__ : int =num_ctc_classes
A__ : List[str] =vocab_size
A__ : Optional[Any] =do_stable_layer_norm
A__ : List[Any] =use_weighted_layer_sum
A__ : Union[str, Any] =classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A__ : List[Any] =apply_spec_augment
A__ : Tuple =mask_time_prob
A__ : List[str] =mask_time_length
A__ : Any =mask_time_min_masks
A__ : str =mask_feature_prob
A__ : Optional[int] =mask_feature_length
A__ : int =mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
A__ : List[str] =num_codevectors_per_group
A__ : Union[str, Any] =num_codevector_groups
A__ : Optional[int] =contrastive_logits_temperature
A__ : Dict =feat_quantizer_dropout
A__ : Tuple =num_negatives
A__ : Any =codevector_dim
A__ : List[Any] =proj_codevector_dim
A__ : Union[str, Any] =diversity_loss_weight
# ctc loss
A__ : Optional[Any] =ctc_loss_reduction
A__ : List[Any] =ctc_zero_infinity
# pretraining loss
A__ : str =replace_prob
@property
def _UpperCAmelCase ( self : str ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 656 | """simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any]=13 , UpperCamelCase__ : Optional[int]=7 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : List[str]=99 , UpperCamelCase__ : Optional[Any]=32 , UpperCamelCase__ : Any=5 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : Union[str, Any]=37 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Optional[Any]=512 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : List[Any]=4 , ):
A__ : str =parent
A__ : List[str] =batch_size
A__ : Any =seq_length
A__ : List[str] =is_training
A__ : List[Any] =use_attention_mask
A__ : List[Any] =use_token_type_ids
A__ : Dict =use_labels
A__ : List[Any] =vocab_size
A__ : Optional[int] =hidden_size
A__ : Optional[Any] =num_hidden_layers
A__ : str =num_attention_heads
A__ : int =intermediate_size
A__ : Tuple =hidden_act
A__ : Tuple =hidden_dropout_prob
A__ : Dict =attention_probs_dropout_prob
A__ : Any =max_position_embeddings
A__ : Any =type_vocab_size
A__ : Union[str, Any] =type_sequence_label_size
A__ : Optional[Any] =initializer_range
A__ : int =num_choices
def _UpperCAmelCase ( self : Tuple ):
A__ : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : List[str] =None
if self.use_attention_mask:
A__ : Optional[int] =random_attention_mask([self.batch_size, self.seq_length] )
A__ : str =None
if self.use_token_type_ids:
A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ : Any =RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCAmelCase ( self : Tuple ):
A__ : Dict =self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ : str =config_and_inputs
A__ : Optional[Any] ={"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def _UpperCAmelCase ( self : int ):
A__ : str =self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ : Union[str, Any] =config_and_inputs
A__ : Union[str, Any] =True
A__ : List[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class __lowerCAmelCase ( _UpperCamelCase , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Union[str, Any] = True
__magic_name__ : Dict = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _UpperCAmelCase ( self : Optional[int] ):
A__ : Optional[int] =FlaxRobertaPreLayerNormModelTester(self )
@slow
def _UpperCAmelCase ( self : List[Any] ):
for model_class_name in self.all_model_classes:
A__ : Tuple =model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=UpperCamelCase__ )
A__ : Union[str, Any] =model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase__ )
@require_flax
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
@slow
def _UpperCAmelCase ( self : Tuple ):
A__ : Any =FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=UpperCamelCase__ )
A__ : Tuple =np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa )
A__ : str =model(UpperCamelCase__ )[0]
A__ : List[Any] =[1, 11, 50265]
self.assertEqual(list(output.shape ) , UpperCamelCase__ )
# compare the actual values for a slice.
A__ : Any =np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
@slow
def _UpperCAmelCase ( self : List[Any] ):
A__ : Union[str, Any] =FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=UpperCamelCase__ )
A__ : List[Any] =np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa )
A__ : Dict =model(UpperCamelCase__ )[0]
# compare the actual values for a slice.
A__ : Optional[Any] =np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
| 656 | 1 |
"""simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 656 | """simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : List[Any] = logging.get_logger(__name__)
__A : Any = [
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
__A : Optional[int] = [
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def lowercase ( UpperCamelCase : Tuple ):
"""simple docstring"""
A__ : Union[str, Any] =torch.load(UpperCamelCase , map_location="cpu" )
return sd
def lowercase ( UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : int=rename_keys_prefix ):
"""simple docstring"""
A__ : List[str] =OrderedDict()
A__ : str =torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
A__ : Optional[Any] =key
for name_pair in rename_keys_prefix:
A__ : int =new_key.replace(name_pair[0] , name_pair[1] )
A__ : Dict =d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
A__ : Optional[int] =new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def lowercase ( UpperCamelCase : Dict , UpperCamelCase : List[str] ):
"""simple docstring"""
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
A__ : Any ="pretraining"
if "vcr" in checkpoint_path:
A__ : Union[str, Any] ={"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
A__ : Optional[Any] ={"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
A__ : Optional[int] ={"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
A__ : List[str] ={"visual_embedding_dim": 1024}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
A__ : Optional[int] ={"visual_embedding_dim": 512}
A__ : List[str] ="multichoice"
elif "vqa_advanced" in checkpoint_path:
A__ : Any ={"visual_embedding_dim": 2048}
A__ : str ="vqa_advanced"
elif "vqa" in checkpoint_path:
A__ : Optional[int] ={"visual_embedding_dim": 2048, "num_labels": 3129}
A__ : str ="vqa"
elif "nlvr" in checkpoint_path:
A__ : str ={
"visual_embedding_dim": 1024,
"num_labels": 2,
}
A__ : Dict ="nlvr"
A__ : Union[str, Any] =VisualBertConfig(**UpperCamelCase )
# Load State Dict
A__ : int =load_state_dict(UpperCamelCase )
A__ : Tuple =get_new_dict(UpperCamelCase , UpperCamelCase )
if model_type == "pretraining":
A__ : str =VisualBertForPreTraining(UpperCamelCase )
elif model_type == "vqa":
A__ : Optional[int] =VisualBertForQuestionAnswering(UpperCamelCase )
elif model_type == "nlvr":
A__ : Union[str, Any] =VisualBertForVisualReasoning(UpperCamelCase )
elif model_type == "multichoice":
A__ : Union[str, Any] =VisualBertForMultipleChoice(UpperCamelCase )
model.load_state_dict(UpperCamelCase )
# Save Checkpoints
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
model.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
__A : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
__A : str = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 656 | 1 |
"""simple docstring"""
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__A : int = 0
__A : List[Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__A : Tuple = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__A : List[str] = tuple[int, int]
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Node | None , ):
A__ : List[Any] =pos_x
A__ : Union[str, Any] =pos_y
A__ : int =(pos_y, pos_x)
A__ : int =goal_x
A__ : int =goal_y
A__ : List[str] =g_cost
A__ : Union[str, Any] =parent
A__ : Optional[Any] =self.calculate_heuristic()
A__ : Dict =self.g_cost + self.h_cost
def _UpperCAmelCase ( self : List[Any] ):
A__ : Dict =self.pos_x - self.goal_x
A__ : Optional[int] =self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(UpperCamelCase__ ) + abs(UpperCamelCase__ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : List[Any] , UpperCamelCase__ : Node ):
return self.f_cost < other.f_cost
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : TPosition , UpperCamelCase__ : TPosition ):
A__ : Tuple =Node(start[1] , start[0] , goal[1] , goal[0] , 0 , UpperCamelCase__ )
A__ : List[Any] =Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , UpperCamelCase__ )
A__ : List[Any] =[self.start]
A__ : list[Node] =[]
A__ : str =False
def _UpperCAmelCase ( self : Optional[int] ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
A__ : Union[str, Any] =self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(UpperCamelCase__ )
self.closed_nodes.append(UpperCamelCase__ )
A__ : List[Any] =self.get_successors(UpperCamelCase__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(UpperCamelCase__ )
else:
# retrieve the best current path
A__ : Union[str, Any] =self.open_nodes.pop(self.open_nodes.index(UpperCamelCase__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(UpperCamelCase__ )
else:
self.open_nodes.append(UpperCamelCase__ )
return [self.start.pos]
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : Node ):
A__ : List[str] =[]
for action in delta:
A__ : List[Any] =parent.pos_x + action[1]
A__ : List[Any] =parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCamelCase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
UpperCamelCase__ , UpperCamelCase__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , UpperCamelCase__ , ) )
return successors
def _UpperCAmelCase ( self : str , UpperCamelCase__ : Node | None ):
A__ : Union[str, Any] =node
A__ : List[Any] =[]
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
A__ : Optional[Any] =current_node.parent
path.reverse()
return path
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : TPosition , UpperCamelCase__ : TPosition ):
A__ : List[str] =AStar(UpperCamelCase__ , UpperCamelCase__ )
A__ : List[str] =AStar(UpperCamelCase__ , UpperCamelCase__ )
A__ : Optional[Any] =False
def _UpperCAmelCase ( self : Dict ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
A__ : List[str] =self.fwd_astar.open_nodes.pop(0 )
A__ : Dict =self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
UpperCamelCase__ , UpperCamelCase__ )
self.fwd_astar.closed_nodes.append(UpperCamelCase__ )
self.bwd_astar.closed_nodes.append(UpperCamelCase__ )
A__ : int =current_bwd_node
A__ : List[str] =current_fwd_node
A__ : Dict ={
self.fwd_astar: self.fwd_astar.get_successors(UpperCamelCase__ ),
self.bwd_astar: self.bwd_astar.get_successors(UpperCamelCase__ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(UpperCamelCase__ )
else:
# retrieve the best current path
A__ : Optional[int] =astar.open_nodes.pop(
astar.open_nodes.index(UpperCamelCase__ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(UpperCamelCase__ )
else:
astar.open_nodes.append(UpperCamelCase__ )
return [self.fwd_astar.start.pos]
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : Node , UpperCamelCase__ : Node ):
A__ : Dict =self.fwd_astar.retrace_path(UpperCamelCase__ )
A__ : List[Any] =self.bwd_astar.retrace_path(UpperCamelCase__ )
bwd_path.pop()
bwd_path.reverse()
A__ : Optional[int] =fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__A : Union[str, Any] = (0, 0)
__A : int = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__A : int = time.time()
__A : Any = AStar(init, goal)
__A : List[Any] = a_star.search()
__A : Any = time.time() - start_time
print(f"""AStar execution time = {end_time:f} seconds""")
__A : Union[str, Any] = time.time()
__A : List[Any] = BidirectionalAStar(init, goal)
__A : Optional[int] = time.time() - bd_start_time
print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 656 | """simple docstring"""
__A : Union[str, Any] = {str(digit): digit**5 for digit in range(10)}
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(UpperCamelCase ) )
def lowercase ( ):
"""simple docstring"""
return sum(
number
for number in range(1000 , 1000000 )
if number == digits_fifth_powers_sum(UpperCamelCase ) )
if __name__ == "__main__":
print(solution())
| 656 | 1 |
"""simple docstring"""
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( UpperCamelCase : Tuple , UpperCamelCase : List[str] , UpperCamelCase : Any , UpperCamelCase : Dict ):
"""simple docstring"""
# Initialise PyTorch model
A__ : Any =BigBirdConfig.from_json_file(UpperCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
if is_trivia_qa:
A__ : str =BigBirdForQuestionAnswering(UpperCamelCase )
else:
A__ : List[str] =BigBirdForPreTraining(UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(UpperCamelCase , UpperCamelCase , is_trivia_qa=UpperCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
__A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--big_bird_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_trivia_qa", action="store_true", help="Whether to convert a model with a trivia_qa head."
)
__A : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 656 | """simple docstring"""
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
__A : Optional[Any] = logging.get_logger(__name__)
# General docstring
__A : str = "PoolFormerConfig"
# Base docstring
__A : Optional[Any] = "sail/poolformer_s12"
__A : List[Any] = [1, 512, 7, 7]
# Image classification docstring
__A : List[str] = "sail/poolformer_s12"
__A : Tuple = "tabby, tabby cat"
__A : Tuple = [
"sail/poolformer_s12",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def lowercase ( UpperCamelCase : Any , UpperCamelCase : float = 0.0 , UpperCamelCase : bool = False ):
"""simple docstring"""
if drop_prob == 0.0 or not training:
return input
A__ : Tuple =1 - drop_prob
A__ : List[str] =(input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
A__ : Any =keep_prob + torch.rand(UpperCamelCase , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
A__ : Optional[int] =input.div(UpperCamelCase ) * random_tensor
return output
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : Optional[float] = None ):
super().__init__()
A__ : Optional[int] =drop_prob
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : torch.Tensor ):
return drop_path(UpperCamelCase__ , self.drop_prob , self.training )
def _UpperCAmelCase ( self : List[str] ):
return "p={}".format(self.drop_prob )
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int=None ):
super().__init__()
A__ : Optional[int] =patch_size if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (patch_size, patch_size)
A__ : Optional[int] =stride if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (stride, stride)
A__ : int =padding if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (padding, padding)
A__ : Any =nn.Convad(UpperCamelCase__ , UpperCamelCase__ , kernel_size=UpperCamelCase__ , stride=UpperCamelCase__ , padding=UpperCamelCase__ )
A__ : Any =norm_layer(UpperCamelCase__ ) if norm_layer else nn.Identity()
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : str ):
A__ : List[str] =self.projection(UpperCamelCase__ )
A__ : Any =self.norm(UpperCamelCase__ )
return embeddings
class __lowerCAmelCase ( nn.GroupNorm):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : Dict , **UpperCamelCase__ : Union[str, Any] ):
super().__init__(1 , UpperCamelCase__ , **UpperCamelCase__ )
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : Optional[int] ):
super().__init__()
A__ : Any =nn.AvgPoolad(UpperCamelCase__ , stride=1 , padding=pool_size // 2 , count_include_pad=UpperCamelCase__ )
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : List[str] ):
return self.pool(UpperCamelCase__ ) - hidden_states
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] ):
super().__init__()
A__ : List[Any] =nn.Convad(UpperCamelCase__ , UpperCamelCase__ , 1 )
A__ : Union[str, Any] =nn.Convad(UpperCamelCase__ , UpperCamelCase__ , 1 )
A__ : Dict =PoolFormerDropPath(UpperCamelCase__ )
if isinstance(config.hidden_act , UpperCamelCase__ ):
A__ : Tuple =ACTaFN[config.hidden_act]
else:
A__ : Optional[Any] =config.hidden_act
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : Dict ):
A__ : Optional[Any] =self.conva(UpperCamelCase__ )
A__ : List[str] =self.act_fn(UpperCamelCase__ )
A__ : List[str] =self.drop(UpperCamelCase__ )
A__ : Optional[int] =self.conva(UpperCamelCase__ )
A__ : Optional[Any] =self.drop(UpperCamelCase__ )
return hidden_states
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any ):
super().__init__()
A__ : Optional[int] =PoolFormerPooling(UpperCamelCase__ )
A__ : List[str] =PoolFormerOutput(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A__ : int =PoolFormerGroupNorm(UpperCamelCase__ )
A__ : int =PoolFormerGroupNorm(UpperCamelCase__ )
# Useful for training neural nets
A__ : Tuple =PoolFormerDropPath(UpperCamelCase__ ) if drop_path > 0.0 else nn.Identity()
A__ : Optional[Any] =config.use_layer_scale
if config.use_layer_scale:
A__ : List[str] =nn.Parameter(
config.layer_scale_init_value * torch.ones((UpperCamelCase__) ) , requires_grad=UpperCamelCase__ )
A__ : List[Any] =nn.Parameter(
config.layer_scale_init_value * torch.ones((UpperCamelCase__) ) , requires_grad=UpperCamelCase__ )
def _UpperCAmelCase ( self : Any , UpperCamelCase__ : Optional[int] ):
if self.use_layer_scale:
A__ : Optional[int] =self.pooling(self.before_norm(UpperCamelCase__ ) )
A__ : Union[str, Any] =self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
A__ : Union[str, Any] =hidden_states + self.drop_path(UpperCamelCase__ )
A__ : Tuple =()
A__ : List[str] =self.output(self.after_norm(UpperCamelCase__ ) )
A__ : Optional[Any] =self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
A__ : str =hidden_states + self.drop_path(UpperCamelCase__ )
A__ : List[Any] =(output,) + outputs
return outputs
else:
A__ : Tuple =self.drop_path(self.pooling(self.before_norm(UpperCamelCase__ ) ) )
# First residual connection
A__ : Optional[Any] =pooling_output + hidden_states
A__ : Tuple =()
# Second residual connection inside the PoolFormerOutput block
A__ : List[str] =self.drop_path(self.output(self.after_norm(UpperCamelCase__ ) ) )
A__ : Any =hidden_states + layer_output
A__ : Tuple =(output,) + outputs
return outputs
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : List[str] ):
super().__init__()
A__ : Tuple =config
# stochastic depth decay rule
A__ : Dict =[x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
A__ : Tuple =[]
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
A__ : List[str] =nn.ModuleList(UpperCamelCase__ )
# Transformer blocks
A__ : Union[str, Any] =[]
A__ : Any =0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
A__ : Union[str, Any] =[]
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
UpperCamelCase__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(UpperCamelCase__ ) )
A__ : str =nn.ModuleList(UpperCamelCase__ )
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Optional[int]=True ):
A__ : Union[str, Any] =() if output_hidden_states else None
A__ : Dict =pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
A__ , A__ : List[Any] =layers
# Get patch embeddings from hidden_states
A__ : Any =embedding_layer(UpperCamelCase__ )
# Send the embeddings through the blocks
for _, blk in enumerate(UpperCamelCase__ ):
A__ : List[str] =blk(UpperCamelCase__ )
A__ : Tuple =layer_outputs[0]
if output_hidden_states:
A__ : List[Any] =all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=UpperCamelCase__ , hidden_states=UpperCamelCase__ )
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : List[str] = PoolFormerConfig
__magic_name__ : int = """poolformer"""
__magic_name__ : Any = """pixel_values"""
__magic_name__ : Any = True
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : str ):
if isinstance(UpperCamelCase__ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCamelCase__ , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any]=False ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Optional[Any] =value
__A : Optional[int] = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__A : Dict = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n"
@add_start_docstrings(
"""The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.""" , _UpperCamelCase , )
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : Dict ):
super().__init__(UpperCamelCase__ )
A__ : List[Any] =config
A__ : Optional[Any] =PoolFormerEncoder(UpperCamelCase__ )
# Initialize weights and apply final processing
self.post_init()
def _UpperCAmelCase ( self : Tuple ):
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(UpperCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _UpperCAmelCase ( self : str , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , ):
A__ : int =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A__ : Optional[int] =return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
A__ : List[Any] =self.encoder(
UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , return_dict=UpperCamelCase__ , )
A__ : int =encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=UpperCamelCase__ , hidden_states=encoder_outputs.hidden_states , )
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Optional[Any] ):
super().__init__()
A__ : List[str] =nn.Linear(config.hidden_size , config.hidden_size )
def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : List[Any] ):
A__ : int =self.dense(UpperCamelCase__ )
return output
@add_start_docstrings(
"""
PoolFormer Model transformer with an image classification head on top
""" , _UpperCamelCase , )
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : str ):
super().__init__(UpperCamelCase__ )
A__ : List[str] =config.num_labels
A__ : Optional[int] =PoolFormerModel(UpperCamelCase__ )
# Final norm
A__ : Dict =PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
A__ : Dict =(
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , ):
A__ : Tuple =return_dict if return_dict is not None else self.config.use_return_dict
A__ : List[str] =self.poolformer(
UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , return_dict=UpperCamelCase__ , )
A__ : str =outputs[0]
A__ : List[Any] =self.classifier(self.norm(UpperCamelCase__ ).mean([-2, -1] ) )
A__ : Optional[Any] =None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A__ : int ="regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A__ : Tuple ="single_label_classification"
else:
A__ : Optional[int] ="multi_label_classification"
if self.config.problem_type == "regression":
A__ : Dict =MSELoss()
if self.num_labels == 1:
A__ : Optional[Any] =loss_fct(logits.squeeze() , labels.squeeze() )
else:
A__ : List[str] =loss_fct(UpperCamelCase__ , UpperCamelCase__ )
elif self.config.problem_type == "single_label_classification":
A__ : Tuple =CrossEntropyLoss()
A__ : int =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A__ : List[Any] =BCEWithLogitsLoss()
A__ : str =loss_fct(UpperCamelCase__ , UpperCamelCase__ )
if not return_dict:
A__ : Optional[int] =(logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=UpperCamelCase__ , logits=UpperCamelCase__ , hidden_states=outputs.hidden_states )
| 656 | 1 |
"""simple docstring"""
def lowercase ( UpperCamelCase : int = 2000000 ):
"""simple docstring"""
A__ : Optional[int] =[0 for i in range(n + 1 )]
A__ : List[Any] =1
A__ : List[Any] =1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , UpperCamelCase ):
A__ : int =1
A__ : str =0
for i in range(UpperCamelCase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"""{solution() = }""")
| 656 | """simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
'''simple docstring'''
__magic_name__ : int = IFInpaintingSuperResolutionPipeline
__magic_name__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
__magic_name__ : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""})
__magic_name__ : Optional[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def _UpperCAmelCase ( self : Union[str, Any] ):
return self._get_superresolution_dummy_components()
def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int]=0 ):
if str(UpperCamelCase__ ).startswith("mps" ):
A__ : Any =torch.manual_seed(UpperCamelCase__ )
else:
A__ : Dict =torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
A__ : Tuple =floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
A__ : Optional[int] =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
A__ : Any =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
A__ : List[str] ={
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _UpperCAmelCase ( self : Dict ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _UpperCAmelCase ( self : int ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def _UpperCAmelCase ( self : Tuple ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _UpperCAmelCase ( self : str ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _UpperCAmelCase ( self : Dict ):
self._test_save_load_local()
def _UpperCAmelCase ( self : Optional[int] ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 656 | 1 |
"""simple docstring"""
from __future__ import annotations
__A : Union[str, Any] = [True] * 1_000_001
__A : Union[str, Any] = 2
while i * i <= 1_000_000:
if seive[i]:
for j in range(i * i, 1_000_001, i):
__A : str = False
i += 1
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
return seive[n]
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
return any(digit in "02468" for digit in str(UpperCamelCase ) )
def lowercase ( UpperCamelCase : int = 1000000 ):
"""simple docstring"""
A__ : str =[2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(UpperCamelCase ) and not contains_an_even_digit(UpperCamelCase ):
A__ : Optional[int] =str(UpperCamelCase )
A__ : Optional[int] =[int(str_num[j:] + str_num[:j] ) for j in range(len(UpperCamelCase ) )]
if all(is_prime(UpperCamelCase ) for i in list_nums ):
result.append(UpperCamelCase )
return result
def lowercase ( ):
"""simple docstring"""
return len(find_circular_primes() )
if __name__ == "__main__":
print(f"""{len(find_circular_primes()) = }""")
| 656 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__A : Any = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
__A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 656 | 1 |
"""simple docstring"""
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
__A : Optional[int] = "sshleifer/bart-tiny-random"
__A : str = "patrickvonplaten/t5-tiny-random"
@require_torch
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
@cached_property
def _UpperCAmelCase ( self : Any ):
return AutoConfig.from_pretrained(UpperCamelCase__ )
def _UpperCAmelCase ( self : List[str] ):
A__ , *A__ : str =create_student_by_copying_alternating_layers(UpperCamelCase__ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _UpperCAmelCase ( self : int ):
A__ , *A__ : int =create_student_by_copying_alternating_layers(UpperCamelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCamelCase__ )
def _UpperCAmelCase ( self : int ):
A__ , *A__ : Optional[int] =create_student_by_copying_alternating_layers(UpperCamelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCamelCase__ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _UpperCAmelCase ( self : str ):
A__ , *A__ : int =create_student_by_copying_alternating_layers(UpperCamelCase__ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _UpperCAmelCase ( self : Dict ):
with self.assertRaises(UpperCamelCase__ ):
create_student_by_copying_alternating_layers(UpperCamelCase__ , tempfile.mkdtemp() , e=UpperCamelCase__ , d=UpperCamelCase__ )
| 656 | """simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowercase ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any]=10 ):
"""simple docstring"""
A__ : Tuple =[]
for _ in range(UpperCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowercase ( UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any]=10 ):
"""simple docstring"""
A__ : Dict =[]
for step in range(UpperCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : List[Any] =os.path.join(UpperCamelCase , "schedule.bin" )
torch.save(scheduler.state_dict() , UpperCamelCase )
A__ : Dict =torch.load(UpperCamelCase )
scheduler.load_state_dict(UpperCamelCase )
return lrs
@require_torch
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int ):
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for a, b in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertAlmostEqual(UpperCamelCase__ , UpperCamelCase__ , delta=UpperCamelCase__ )
def _UpperCAmelCase ( self : Tuple ):
A__ : Any =torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase__ )
A__ : Optional[Any] =torch.tensor([0.4, 0.2, -0.5] )
A__ : Any =nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A__ : List[str] =AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
A__ : Optional[int] =criterion(UpperCamelCase__ , UpperCamelCase__ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def _UpperCAmelCase ( self : Dict ):
A__ : Optional[int] =torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase__ )
A__ : Dict =torch.tensor([0.4, 0.2, -0.5] )
A__ : Optional[int] =nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A__ : int =Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCamelCase__ , weight_decay=0.0 , relative_step=UpperCamelCase__ , scale_parameter=UpperCamelCase__ , warmup_init=UpperCamelCase__ , )
for _ in range(1000 ):
A__ : List[Any] =criterion(UpperCamelCase__ , UpperCamelCase__ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
__magic_name__ : Optional[int] = nn.Linear(50 , 50) if is_torch_available() else None
__magic_name__ : Any = AdamW(m.parameters() , lr=10.0) if is_torch_available() else None
__magic_name__ : Union[str, Any] = 10
def _UpperCAmelCase ( self : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int=None ):
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for a, b in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertAlmostEqual(UpperCamelCase__ , UpperCamelCase__ , delta=UpperCamelCase__ , msg=UpperCamelCase__ )
def _UpperCAmelCase ( self : Optional[Any] ):
A__ : Union[str, Any] ={"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
A__ : Union[str, Any] ={
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
A__ , A__ : Any =data
A__ : Union[str, Any] =scheduler_func(self.optimizer , **UpperCamelCase__ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
A__ : int =unwrap_schedule(UpperCamelCase__ , self.num_steps )
self.assertListAlmostEqual(
UpperCamelCase__ , UpperCamelCase__ , tol=1E-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , )
A__ : List[str] =scheduler_func(self.optimizer , **UpperCamelCase__ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(UpperCamelCase__ ) # wrap to test picklability of the schedule
A__ : Tuple =unwrap_and_save_reload_schedule(UpperCamelCase__ , self.num_steps )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ , msg=F'''failed for {scheduler_func} in save and reload''' )
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : str ):
A__ : int =fn
def __call__( self : List[Any] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[Any] ):
return self.fn(*UpperCamelCase__ , **UpperCamelCase__ )
@classmethod
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : Dict ):
A__ : str =list(map(self , scheduler.lr_lambdas ) )
| 656 | 1 |
"""simple docstring"""
import math
import random
def lowercase ( UpperCamelCase : float , UpperCamelCase : bool = False ):
"""simple docstring"""
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
__A : Any = 0.02
def lowercase ( UpperCamelCase : int , UpperCamelCase : int ):
"""simple docstring"""
A__ : Dict =float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(UpperCamelCase ):
# Forward propagation
A__ : int =sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
A__ : Optional[Any] =(expected / 100) - layer_a
# Error delta
A__ : List[str] =layer_1_error * sigmoid_function(UpperCamelCase , UpperCamelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : int = int(input("Expected value: "))
__A : Union[str, Any] = int(input("Number of propagations: "))
print(forward_propagation(expected, number_propagations))
| 656 | """simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__A : List[Any] = logging.get_logger("transformers.models.speecht5")
__A : Optional[Any] = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
__A : Optional[int] = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
__A : List[str] = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
__A : List[Any] = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
__A : Union[str, Any] = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
__A : Any = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
__A : Union[str, Any] = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
__A : Optional[int] = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
__A : Union[str, Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__A : Optional[Any] = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__A : Optional[int] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__A : int = []
__A : int = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
__A : Optional[Any] = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
__A : Tuple = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
__A : Union[str, Any] = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def lowercase ( UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : List[Any] , UpperCamelCase : int ):
"""simple docstring"""
for attribute in key.split("." ):
A__ : Dict =getattr(UpperCamelCase , UpperCamelCase )
if weight_type is not None:
A__ : Union[str, Any] =getattr(UpperCamelCase , UpperCamelCase ).shape
else:
A__ : Tuple =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
A__ : Any =value
elif weight_type == "weight_g":
A__ : Any =value
elif weight_type == "weight_v":
A__ : Any =value
elif weight_type == "bias":
A__ : Tuple =value
elif weight_type == "running_mean":
A__ : Dict =value
elif weight_type == "running_var":
A__ : List[str] =value
elif weight_type == "num_batches_tracked":
A__ : Dict =value
else:
A__ : Optional[int] =value
logger.info(F'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' )
def lowercase ( UpperCamelCase : Tuple , UpperCamelCase : Tuple ):
"""simple docstring"""
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
A__ , A__ : List[str] =key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowercase ( UpperCamelCase : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : Dict ):
"""simple docstring"""
A__ : Tuple =[]
if task == "s2t":
A__ : Dict =hf_model.speechta.encoder.prenet.feature_encoder
A__ : int =MAPPING_S2T
A__ : List[Any] =IGNORE_KEYS_S2T
elif task == "t2s":
A__ : Union[str, Any] =None
A__ : List[Any] =MAPPING_T2S
A__ : Tuple =IGNORE_KEYS_T2S
elif task == "s2s":
A__ : Optional[Any] =hf_model.speechta.encoder.prenet.feature_encoder
A__ : Tuple =MAPPING_S2S
A__ : Any =IGNORE_KEYS_S2S
else:
raise ValueError(F'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(UpperCamelCase , UpperCamelCase ):
logger.info(F'''{name} was ignored''' )
continue
A__ : Optional[Any] =False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , hf_model.config.feat_extract_norm == "group" , )
A__ : List[Any] =True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
A__ , A__ : Dict =key.split(".*." )
if prefix in name and suffix in name:
A__ : int =suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
A__ : List[Any] =True
if "*" in mapped_key:
A__ : Optional[int] =name.split(UpperCamelCase )[0].split("." )[-2]
A__ : int =mapped_key.replace("*" , UpperCamelCase )
if "weight_g" in name:
A__ : str ="weight_g"
elif "weight_v" in name:
A__ : Optional[Any] ="weight_v"
elif "bias" in name:
A__ : Any ="bias"
elif "weight" in name:
A__ : Optional[int] ="weight"
elif "running_mean" in name:
A__ : Tuple ="running_mean"
elif "running_var" in name:
A__ : Optional[int] ="running_var"
elif "num_batches_tracked" in name:
A__ : str ="num_batches_tracked"
else:
A__ : List[Any] =None
set_recursively(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
continue
if not is_used:
unused_weights.append(UpperCamelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowercase ( UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : Dict ):
"""simple docstring"""
A__ : Any =full_name.split("conv_layers." )[-1]
A__ : Dict =name.split("." )
A__ : int =int(items[0] )
A__ : str =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
A__ : Optional[Any] =value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
A__ : Optional[int] =value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
A__ : Any =value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
A__ : Any =value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCamelCase )
@torch.no_grad()
def lowercase ( UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str] , UpperCamelCase : str=None , UpperCamelCase : Any=None , UpperCamelCase : Tuple=None , ):
"""simple docstring"""
if config_path is not None:
A__ : Any =SpeechTaConfig.from_pretrained(UpperCamelCase )
else:
A__ : Any =SpeechTaConfig()
if task == "s2t":
A__ : Union[str, Any] =config.max_text_positions
A__ : Dict =SpeechTaForSpeechToText(UpperCamelCase )
elif task == "t2s":
A__ : str =1876
A__ : Optional[int] =600
A__ : Tuple =config.max_speech_positions
A__ : Optional[Any] =SpeechTaForTextToSpeech(UpperCamelCase )
elif task == "s2s":
A__ : str =1876
A__ : Tuple =config.max_speech_positions
A__ : Any =SpeechTaForSpeechToSpeech(UpperCamelCase )
else:
raise ValueError(F'''Unknown task name: {task}''' )
if vocab_path:
A__ : str =SpeechTaTokenizer(UpperCamelCase , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
A__ : Optional[Any] =AddedToken("<mask>" , lstrip=UpperCamelCase , rstrip=UpperCamelCase )
A__ : int =mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
A__ : Dict =SpeechTaFeatureExtractor()
A__ : Tuple =SpeechTaProcessor(tokenizer=UpperCamelCase , feature_extractor=UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
A__ : Union[str, Any] =torch.load(UpperCamelCase )
recursively_load_weights(fairseq_checkpoint["model"] , UpperCamelCase , UpperCamelCase )
model.save_pretrained(UpperCamelCase )
if repo_id:
print("Pushing to the hub..." )
processor.push_to_hub(UpperCamelCase )
model.push_to_hub(UpperCamelCase )
if __name__ == "__main__":
__A : Dict = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
__A : str = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 656 | 1 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : List[Any] = logging.get_logger(__name__)
__A : Any = [
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
__A : Optional[int] = [
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def lowercase ( UpperCamelCase : Tuple ):
"""simple docstring"""
A__ : Union[str, Any] =torch.load(UpperCamelCase , map_location="cpu" )
return sd
def lowercase ( UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : int=rename_keys_prefix ):
"""simple docstring"""
A__ : List[str] =OrderedDict()
A__ : str =torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
A__ : Optional[Any] =key
for name_pair in rename_keys_prefix:
A__ : int =new_key.replace(name_pair[0] , name_pair[1] )
A__ : Dict =d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
A__ : Optional[int] =new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def lowercase ( UpperCamelCase : Dict , UpperCamelCase : List[str] ):
"""simple docstring"""
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
A__ : Any ="pretraining"
if "vcr" in checkpoint_path:
A__ : Union[str, Any] ={"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
A__ : Optional[Any] ={"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
A__ : Optional[int] ={"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
A__ : List[str] ={"visual_embedding_dim": 1024}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
A__ : Optional[int] ={"visual_embedding_dim": 512}
A__ : List[str] ="multichoice"
elif "vqa_advanced" in checkpoint_path:
A__ : Any ={"visual_embedding_dim": 2048}
A__ : str ="vqa_advanced"
elif "vqa" in checkpoint_path:
A__ : Optional[int] ={"visual_embedding_dim": 2048, "num_labels": 3129}
A__ : str ="vqa"
elif "nlvr" in checkpoint_path:
A__ : str ={
"visual_embedding_dim": 1024,
"num_labels": 2,
}
A__ : Dict ="nlvr"
A__ : Union[str, Any] =VisualBertConfig(**UpperCamelCase )
# Load State Dict
A__ : int =load_state_dict(UpperCamelCase )
A__ : Tuple =get_new_dict(UpperCamelCase , UpperCamelCase )
if model_type == "pretraining":
A__ : str =VisualBertForPreTraining(UpperCamelCase )
elif model_type == "vqa":
A__ : Optional[int] =VisualBertForQuestionAnswering(UpperCamelCase )
elif model_type == "nlvr":
A__ : Union[str, Any] =VisualBertForVisualReasoning(UpperCamelCase )
elif model_type == "multichoice":
A__ : Union[str, Any] =VisualBertForMultipleChoice(UpperCamelCase )
model.load_state_dict(UpperCamelCase )
# Save Checkpoints
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
model.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
__A : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
__A : str = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 656 | """simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase):
'''simple docstring'''
__magic_name__ : List[Any] = [R"""h\.\d+\.attn\.bias""", R"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 50257 , UpperCamelCase__ : int = 1024 , UpperCamelCase__ : int = 768 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str = "gelu_new" , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 1E-5 , UpperCamelCase__ : float = 0.02 , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , ):
super().__init__()
A__ : Dict =prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
F''' `n_embd`: {n_embd} are not equal.''' )
A__ : Optional[int] =prefix_inner_dim
A__ : Optional[int] =prefix_hidden_dim
A__ : Optional[int] =(
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A__ : Optional[int] =(
nn.Linear(self.prefix_hidden_dim , UpperCamelCase__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
A__ : str =GPTaConfig(
vocab_size=UpperCamelCase__ , n_positions=UpperCamelCase__ , n_embd=UpperCamelCase__ , n_layer=UpperCamelCase__ , n_head=UpperCamelCase__ , n_inner=UpperCamelCase__ , activation_function=UpperCamelCase__ , resid_pdrop=UpperCamelCase__ , embd_pdrop=UpperCamelCase__ , attn_pdrop=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ , initializer_range=UpperCamelCase__ , scale_attn_weights=UpperCamelCase__ , use_cache=UpperCamelCase__ , scale_attn_by_inverse_layer_idx=UpperCamelCase__ , reorder_and_upcast_attn=UpperCamelCase__ , )
A__ : Any =GPTaLMHeadModel(UpperCamelCase__ )
def _UpperCAmelCase ( self : Any , UpperCamelCase__ : torch.Tensor , UpperCamelCase__ : torch.Tensor , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[torch.Tensor] = None , ):
A__ : int =self.transformer.transformer.wte(UpperCamelCase__ )
A__ : Tuple =self.encode_prefix(UpperCamelCase__ )
A__ : Union[str, Any] =self.decode_prefix(UpperCamelCase__ )
A__ : Tuple =torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
A__ : Any =self.get_dummy_token(input_ids.shape[0] , input_ids.device )
A__ : List[Any] =torch.cat((dummy_token, input_ids) , dim=1 )
A__ : Any =self.transformer(inputs_embeds=UpperCamelCase__ , labels=UpperCamelCase__ , attention_mask=UpperCamelCase__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : torch.device ):
return torch.zeros(UpperCamelCase__ , self.prefix_length , dtype=torch.intaa , device=UpperCamelCase__ )
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Tuple ):
return self.encode_prefix(UpperCamelCase__ )
@torch.no_grad()
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str ):
A__ : Optional[int] =torch.split(UpperCamelCase__ , 1 , dim=0 )
A__ : List[str] =[]
A__ : Dict =[]
for feature in features:
A__ : Any =self.decode_prefix(feature.to(UpperCamelCase__ ) ) # back to the clip feature
# Only support beam search for now
A__ , A__ : Optional[Any] =self.generate_beam(
input_embeds=UpperCamelCase__ , device=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
A__ : Optional[Any] =torch.stack(UpperCamelCase__ )
A__ : Optional[int] =torch.stack(UpperCamelCase__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def _UpperCAmelCase ( self : List[Any] , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : int = 5 , UpperCamelCase__ : int = 67 , UpperCamelCase__ : float = 1.0 , UpperCamelCase__ : Optional[int] = None , ):
A__ : str =eos_token_id
A__ : Optional[Any] =None
A__ : int =None
A__ : Union[str, Any] =torch.ones(UpperCamelCase__ , device=UpperCamelCase__ , dtype=torch.int )
A__ : Any =torch.zeros(UpperCamelCase__ , device=UpperCamelCase__ , dtype=torch.bool )
if input_embeds is not None:
A__ : Union[str, Any] =input_embeds
else:
A__ : Optional[Any] =self.transformer.transformer.wte(UpperCamelCase__ )
for i in range(UpperCamelCase__ ):
A__ : Optional[int] =self.transformer(inputs_embeds=UpperCamelCase__ )
A__ : Tuple =outputs.logits
A__ : Union[str, Any] =logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A__ : Optional[Any] =logits.softmax(-1 ).log()
if scores is None:
A__ , A__ : Union[str, Any] =logits.topk(UpperCamelCase__ , -1 )
A__ : Union[str, Any] =generated.expand(UpperCamelCase__ , *generated.shape[1:] )
A__ , A__ : Optional[int] =next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
A__ : str =next_tokens
else:
A__ : Optional[Any] =tokens.expand(UpperCamelCase__ , *tokens.shape[1:] )
A__ : str =torch.cat((tokens, next_tokens) , dim=1 )
else:
A__ : Union[str, Any] =-float(np.inf )
A__ : Dict =0
A__ : Optional[Any] =scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A__ : Optional[Any] =scores_sum / seq_lengths[:, None]
A__ , A__ : List[Any] =scores_sum_average.view(-1 ).topk(UpperCamelCase__ , -1 )
A__ : Tuple =next_tokens // scores_sum.shape[1]
A__ : List[Any] =seq_lengths[next_tokens_source]
A__ : int =next_tokens % scores_sum.shape[1]
A__ : str =next_tokens.unsqueeze(1 )
A__ : List[Any] =tokens[next_tokens_source]
A__ : int =torch.cat((tokens, next_tokens) , dim=1 )
A__ : List[str] =generated[next_tokens_source]
A__ : Optional[Any] =scores_sum_average * seq_lengths
A__ : Optional[int] =is_stopped[next_tokens_source]
A__ : List[str] =self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
A__ : str =torch.cat((generated, next_token_embed) , dim=1 )
A__ : str =is_stopped + next_tokens.eq(UpperCamelCase__ ).squeeze()
if is_stopped.all():
break
A__ : Optional[int] =scores / seq_lengths
A__ : List[Any] =scores.argsort(descending=UpperCamelCase__ )
# tokens tensors are already padded to max_seq_length
A__ : int =[tokens[i] for i in order]
A__ : Any =torch.stack(UpperCamelCase__ , dim=0 )
A__ : int =torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 656 | 1 |
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__A : List[Any] = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : Optional[Any]=13 , UpperCamelCase__ : List[str]=7 , UpperCamelCase__ : Union[str, Any]=14 , UpperCamelCase__ : Any=10 , UpperCamelCase__ : Optional[int]=19 , UpperCamelCase__ : int=5 , UpperCamelCase__ : Union[str, Any]=4 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : List[Any]=16 , UpperCamelCase__ : Any=2 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : Optional[int]=[1, 2, 3, 4, 5] , UpperCamelCase__ : Dict=25 , UpperCamelCase__ : Optional[int]=5 , ):
A__ : Union[str, Any] =d_model
A__ : Tuple =parent
A__ : int =batch_size
A__ : str =prediction_length
A__ : Optional[int] =context_length
A__ : Optional[Any] =cardinality
A__ : Optional[Any] =num_time_features
A__ : Any =lags_sequence
A__ : Optional[Any] =embedding_dimension
A__ : Optional[Any] =is_training
A__ : Optional[int] =hidden_size
A__ : int =num_hidden_layers
A__ : Optional[Any] =num_attention_heads
A__ : int =intermediate_size
A__ : str =hidden_act
A__ : List[Any] =hidden_dropout_prob
A__ : Union[str, Any] =attention_probs_dropout_prob
A__ : int =context_length
A__ : List[str] =prediction_length + label_length
A__ : str =label_length
A__ : Optional[int] =moving_average
A__ : Tuple =autocorrelation_factor
def _UpperCAmelCase ( self : List[Any] ):
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def _UpperCAmelCase ( self : Any , UpperCamelCase__ : Dict ):
A__ : Union[str, Any] =config.context_length + max(config.lags_sequence )
A__ : int =ids_tensor([self.batch_size, 1] , config.cardinality[0] )
A__ : Union[str, Any] =floats_tensor([self.batch_size, _past_length, config.num_time_features] )
A__ : Union[str, Any] =floats_tensor([self.batch_size, _past_length] )
A__ : Optional[int] =floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
A__ : Union[str, Any] =floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
A__ : Union[str, Any] =floats_tensor([self.batch_size, config.prediction_length] )
A__ : Tuple ={
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def _UpperCAmelCase ( self : Optional[int] ):
A__ : Optional[Any] =self.get_config()
A__ : List[str] =self.prepare_autoformer_inputs_dict(UpperCamelCase__ )
return config, inputs_dict
def _UpperCAmelCase ( self : List[str] ):
A__ , A__ : Optional[Any] =self.prepare_config_and_inputs()
return config, inputs_dict
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple ):
A__ : List[str] =AutoformerModel(config=UpperCamelCase__ ).to(UpperCamelCase__ ).eval()
A__ : Union[str, Any] =model(**UpperCamelCase__ )
A__ : Union[str, Any] =outputs.encoder_last_hidden_state
A__ : str =outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : str =model.get_encoder()
encoder.save_pretrained(UpperCamelCase__ )
A__ : Tuple =AutoformerEncoder.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
A__ , A__ , A__ , A__ , A__ : Tuple =model.create_network_inputs(**UpperCamelCase__ )
A__ , A__ : Dict =model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
A__ : Optional[int] =torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
A__ : List[Any] =encoder(inputs_embeds=UpperCamelCase__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
A__ : List[Any] =(
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
A__ : Any =torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
A__ : int =torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
A__ : Optional[int] =torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Optional[Any] =model.get_decoder()
decoder.save_pretrained(UpperCamelCase__ )
A__ : Optional[Any] =AutoformerDecoder.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
A__ : Any =decoder(
trend=UpperCamelCase__ , inputs_embeds=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
'''simple docstring'''
__magic_name__ : str = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
__magic_name__ : Any = (AutoformerForPrediction,) if is_torch_available() else ()
__magic_name__ : Tuple = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
__magic_name__ : str = False
__magic_name__ : int = False
__magic_name__ : Optional[int] = False
__magic_name__ : List[Any] = False
__magic_name__ : Union[str, Any] = False
__magic_name__ : Tuple = False
def _UpperCAmelCase ( self : int ):
A__ : Any =AutoformerModelTester(self )
A__ : Optional[Any] =ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def _UpperCAmelCase ( self : Tuple ):
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : Optional[int] ):
A__ , A__ : str =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
A__ : Tuple =model_class(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ )
A__ , A__ : Optional[int] =model_class.from_pretrained(UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
self.assertEqual(info["missing_keys"] , [] )
def _UpperCAmelCase ( self : str ):
A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*UpperCamelCase__ )
@unittest.skip(reason="Model has no tokens embeddings" )
def _UpperCAmelCase ( self : Dict ):
pass
def _UpperCAmelCase ( self : Any ):
A__ : Optional[int] =inspect.signature(getattr(UpperCamelCase__ , "forward" ) )
# The main input is the name of the argument after `self`
A__ : Optional[int] =list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , UpperCamelCase__ )
def _UpperCAmelCase ( self : Tuple ):
A__ , A__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : List[str] =model_class(UpperCamelCase__ )
A__ : Tuple =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : Any =[*signature.parameters.keys()]
A__ : Union[str, Any] =[
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(UpperCamelCase__ )] , UpperCamelCase__ )
def _UpperCAmelCase ( self : Dict ):
A__ , A__ : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
A__ : Any =True
A__ : int =getattr(self.model_tester , "seq_length" , UpperCamelCase__ )
A__ : Tuple =getattr(self.model_tester , "decoder_seq_length" , UpperCamelCase__ )
A__ : Dict =getattr(self.model_tester , "encoder_seq_length" , UpperCamelCase__ )
A__ : List[str] =getattr(self.model_tester , "d_model" , UpperCamelCase__ )
A__ : Any =getattr(self.model_tester , "num_attention_heads" , UpperCamelCase__ )
A__ : Union[str, Any] =d_model // num_attention_heads
for model_class in self.all_model_classes:
A__ : Tuple =True
A__ : Any =False
A__ : Union[str, Any] =True
A__ : Dict =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Tuple =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Optional[Any] =outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ : Optional[int] =True
A__ : int =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : int =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : List[Any] =outputs.encoder_attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
A__ : str =len(UpperCamelCase__ )
A__ : Tuple =7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# decoder attentions
A__ : str =outputs.decoder_attentions
self.assertIsInstance(UpperCamelCase__ , (list, tuple) )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
A__ : Optional[int] =outputs.cross_attentions
self.assertIsInstance(UpperCamelCase__ , (list, tuple) )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
A__ : Dict =True
A__ : Any =True
A__ : List[Any] =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Optional[int] =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + 2 , len(UpperCamelCase__ ) )
A__ : List[str] =outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def _UpperCAmelCase ( self : Union[str, Any] ):
super().test_retain_grad_hidden_states_attentions()
def lowercase ( UpperCamelCase : Union[str, Any]="train-batch.pt" ):
"""simple docstring"""
A__ : Dict =hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=UpperCamelCase , repo_type="dataset" )
A__ : Optional[Any] =torch.load(UpperCamelCase , map_location=UpperCamelCase )
return batch
@require_torch
@slow
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
def _UpperCAmelCase ( self : List[Any] ):
A__ : List[str] =AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCamelCase__ )
A__ : List[str] =prepare_batch()
with torch.no_grad():
A__ : Optional[Any] =model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
A__ : Any =torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , UpperCamelCase__ )
A__ : List[str] =torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def _UpperCAmelCase ( self : Any ):
A__ : Optional[Any] =AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCamelCase__ )
A__ : Optional[int] =prepare_batch("val-batch.pt" )
with torch.no_grad():
A__ : Optional[Any] =model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
A__ : Optional[Any] =torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , UpperCamelCase__ )
A__ : int =torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def _UpperCAmelCase ( self : Dict ):
A__ : Tuple =AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCamelCase__ )
A__ : Union[str, Any] =prepare_batch("val-batch.pt" )
with torch.no_grad():
A__ : Optional[int] =model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
A__ : List[Any] =torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , UpperCamelCase__ )
A__ : Union[str, Any] =torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=UpperCamelCase__ )
A__ : Dict =outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , UpperCamelCase__ , rtol=1E-1 ) )
| 656 | """simple docstring"""
import os
def lowercase ( ):
"""simple docstring"""
A__ : List[Any] =os.path.dirname(os.path.realpath(UpperCamelCase ) )
A__ : str =os.path.join(UpperCamelCase , "triangle.txt" )
with open(UpperCamelCase ) as f:
A__ : Optional[int] =f.readlines()
A__ : str =[]
for line in triangle:
A__ : Union[str, Any] =[]
for number in line.strip().split(" " ):
numbers_from_line.append(int(UpperCamelCase ) )
a.append(UpperCamelCase )
for i in range(1 , len(UpperCamelCase ) ):
for j in range(len(a[i] ) ):
A__ : Union[str, Any] =a[i - 1][j] if j != len(a[i - 1] ) else 0
A__ : Union[str, Any] =a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(UpperCamelCase , UpperCamelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 656 | 1 |
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def lowercase ( UpperCamelCase : str = "isbn/0140328726" ):
"""simple docstring"""
A__ : Dict =olid.strip().strip("/" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("/" ) != 1:
A__ : Tuple =F'''{olid} is not a valid Open Library olid'''
raise ValueError(UpperCamelCase )
return requests.get(F'''https://openlibrary.org/{new_olid}.json''' ).json()
def lowercase ( UpperCamelCase : dict ):
"""simple docstring"""
A__ : List[Any] ={
"title": "Title",
"publish_date": "Publish date",
"authors": "Authors",
"number_of_pages": "Number of pages:",
"first_sentence": "First sentence",
"isbn_10": "ISBN (10)",
"isbn_13": "ISBN (13)",
}
A__ : int ={better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
A__ : Union[str, Any] =[
get_openlibrary_data(author["key"] )["name"] for author in data["Authors"]
]
A__ : List[Any] =data["First sentence"]["value"]
for key, value in data.items():
if isinstance(UpperCamelCase , UpperCamelCase ):
A__ : Union[str, Any] =", ".join(UpperCamelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__A : Any = input("\nEnter the ISBN code to search (or 'quit' to stop): ").strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""")
continue
print(f"""\nSearching Open Library for ISBN: {isbn}...\n""")
try:
__A : List[str] = summarize_book(get_openlibrary_data(f"""isbn/{isbn}"""))
print("\n".join(f"""{key}: {value}""" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f"""Sorry, there are no results for ISBN: {isbn}.""")
| 656 | """simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A : int = logging.get_logger(__name__)
def lowercase ( UpperCamelCase : Any ):
"""simple docstring"""
A__ : str =OrderedDict()
for key, value in state_dict.items():
if key.startswith("module.encoder" ):
A__ : Dict =key.replace("module.encoder" , "glpn.encoder" )
if key.startswith("module.decoder" ):
A__ : Optional[int] =key.replace("module.decoder" , "decoder.stages" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
A__ : Tuple =key[key.find("patch_embed" ) + len("patch_embed" )]
A__ : Optional[Any] =key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(UpperCamelCase )-1}''' )
if "norm" in key:
A__ : Dict =key.replace("norm" , "layer_norm" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
A__ : Any =key[key.find("glpn.encoder.layer_norm" ) + len("glpn.encoder.layer_norm" )]
A__ : Tuple =key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(UpperCamelCase )-1}''' )
if "layer_norm1" in key:
A__ : List[Any] =key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
A__ : Optional[int] =key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
A__ : int =key[key.find("block" ) + len("block" )]
A__ : Optional[Any] =key.replace(F'''block{idx}''' , F'''block.{int(UpperCamelCase )-1}''' )
if "attn.q" in key:
A__ : Optional[Any] =key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
A__ : Union[str, Any] =key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
A__ : str =key.replace("attn" , "attention.self" )
if "fc1" in key:
A__ : Dict =key.replace("fc1" , "dense1" )
if "fc2" in key:
A__ : str =key.replace("fc2" , "dense2" )
if "linear_pred" in key:
A__ : List[Any] =key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
A__ : List[str] =key.replace("linear_fuse.conv" , "linear_fuse" )
A__ : Any =key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
A__ : str =key[key.find("linear_c" ) + len("linear_c" )]
A__ : Dict =key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(UpperCamelCase )-1}''' )
if "bot_conv" in key:
A__ : Union[str, Any] =key.replace("bot_conv" , "0.convolution" )
if "skip_conv1" in key:
A__ : List[Any] =key.replace("skip_conv1" , "1.convolution" )
if "skip_conv2" in key:
A__ : int =key.replace("skip_conv2" , "2.convolution" )
if "fusion1" in key:
A__ : Optional[Any] =key.replace("fusion1" , "1.fusion" )
if "fusion2" in key:
A__ : Optional[Any] =key.replace("fusion2" , "2.fusion" )
if "fusion3" in key:
A__ : int =key.replace("fusion3" , "3.fusion" )
if "fusion" in key and "conv" in key:
A__ : List[str] =key.replace("conv" , "convolutional_layer" )
if key.startswith("module.last_layer_depth" ):
A__ : Tuple =key.replace("module.last_layer_depth" , "head.head" )
A__ : int =value
return new_state_dict
def lowercase ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict ):
"""simple docstring"""
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
A__ : int =state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
A__ : str =state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
A__ : List[str] =kv_weight[
: config.hidden_sizes[i], :
]
A__ : Dict =kv_bias[: config.hidden_sizes[i]]
A__ : Any =kv_weight[
config.hidden_sizes[i] :, :
]
A__ : Any =kv_bias[config.hidden_sizes[i] :]
def lowercase ( ):
"""simple docstring"""
A__ : Optional[Any] ="http://images.cocodataset.org/val2017/000000039769.jpg"
A__ : List[Any] =Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return image
@torch.no_grad()
def lowercase ( UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : List[str]=False , UpperCamelCase : str=None ):
"""simple docstring"""
A__ : List[str] =GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
A__ : str =GLPNImageProcessor()
# prepare image
A__ : Any =prepare_img()
A__ : Optional[int] =image_processor(images=UpperCamelCase , return_tensors="pt" ).pixel_values
logger.info("Converting model..." )
# load original state dict
A__ : int =torch.load(UpperCamelCase , map_location=torch.device("cpu" ) )
# rename keys
A__ : Union[str, Any] =rename_keys(UpperCamelCase )
# key and value matrices need special treatment
read_in_k_v(UpperCamelCase , UpperCamelCase )
# create HuggingFace model and load state dict
A__ : Optional[int] =GLPNForDepthEstimation(UpperCamelCase )
model.load_state_dict(UpperCamelCase )
model.eval()
# forward pass
A__ : int =model(UpperCamelCase )
A__ : Optional[Any] =outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
A__ : List[Any] =torch.tensor(
[[4.41_47, 4.08_73, 4.06_73], [3.78_90, 3.28_81, 3.15_25], [3.76_74, 3.54_23, 3.49_13]] )
elif "kitti" in model_name:
A__ : Tuple =torch.tensor(
[[3.42_91, 2.78_65, 2.51_51], [3.28_41, 2.70_21, 2.35_02], [3.11_47, 2.46_25, 2.24_81]] )
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
A__ : str =torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , UpperCamelCase , atol=1E-4 )
print("Looks ok!" )
# finally, push to hub if required
if push_to_hub:
logger.info("Pushing model and image processor to the hub..." )
model.push_to_hub(
repo_path_or_name=Path(UpperCamelCase , UpperCamelCase ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=UpperCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCamelCase , UpperCamelCase ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=UpperCamelCase , )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path",
default=None,
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
parser.add_argument(
"--model_name",
default="glpn-kitti",
type=str,
help="Name of the model in case you're pushing to the hub.",
)
__A : Any = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 656 | 1 |
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__A : str = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__A : Dict = 128_022
__A : Optional[Any] = 128_028
@require_sentencepiece
class __lowerCAmelCase ( _UpperCamelCase , unittest.TestCase):
'''simple docstring'''
__magic_name__ : str = MaMaaaTokenizer
__magic_name__ : str = False
__magic_name__ : Union[str, Any] = False
__magic_name__ : Any = True
def _UpperCAmelCase ( self : List[str] ):
super().setUp()
A__ : Union[str, Any] =["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
A__ : Optional[int] =dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
A__ : Optional[Any] =Path(self.tmpdirname )
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
A__ : Tuple =MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self : List[str] , **UpperCamelCase__ : Optional[Any] ):
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : Optional[int] ):
return (
"This is a test",
"This is a test",
)
def _UpperCAmelCase ( self : Dict ):
A__ : List[Any] ="</s>"
A__ : Optional[int] =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def _UpperCAmelCase ( self : str ):
A__ : int =self.get_tokenizer()
A__ : str =list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(UpperCamelCase__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def _UpperCAmelCase ( self : List[Any] ):
pass
def _UpperCAmelCase ( self : Tuple ):
A__ : List[str] =self.get_tokenizer()
A__ : str =tokenizer.tokenize("This is a test" )
self.assertListEqual(UpperCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [2, 3, 4, 5, 6] , )
A__ : Any =tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(UpperCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
A__ : Optional[Any] =tokenizer.convert_tokens_to_string(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , "This is a test" )
@slow
def _UpperCAmelCase ( self : int ):
# fmt: off
A__ : List[str] ={"input_ids": [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
__magic_name__ : Union[str, Any] = """facebook/m2m100_418M"""
__magic_name__ : List[Any] = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
__magic_name__ : Optional[int] = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
__magic_name__ : List[str] = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2]
@classmethod
def _UpperCAmelCase ( cls : List[Any] ):
A__ : MaMaaaTokenizer =MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
A__ : List[Any] =1
return cls
def _UpperCAmelCase ( self : Optional[Any] ):
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 128006 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 128022 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 128076 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 128063 )
def _UpperCAmelCase ( self : Optional[Any] ):
A__ : str =self.tokenizer.get_vocab()
self.assertEqual(len(UpperCamelCase__ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , UpperCamelCase__ )
def _UpperCAmelCase ( self : Union[str, Any] ):
A__ : List[Any] ="en"
A__ : List[Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase__ )
def _UpperCAmelCase ( self : List[Any] ):
self.assertIn(UpperCamelCase__ , self.tokenizer.all_special_ids )
# fmt: off
A__ : List[Any] =[FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2]
# fmt: on
A__ : int =self.tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
A__ : List[Any] =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase__ )
def _UpperCAmelCase ( self : Optional[int] ):
A__ : Optional[int] =tempfile.mkdtemp()
A__ : str =self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(UpperCamelCase__ )
A__ : Union[str, Any] =MaMaaaTokenizer.from_pretrained(UpperCamelCase__ )
self.assertDictEqual(new_tok.lang_token_to_id , UpperCamelCase__ )
@require_torch
def _UpperCAmelCase ( self : Optional[Any] ):
A__ : Any ="en"
A__ : List[str] ="fr"
A__ : Any =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase__ , return_tensors="pt" )
A__ : List[str] =shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
A__ : List[Any] =batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def _UpperCAmelCase ( self : Optional[Any] ):
A__ : Tuple ="mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
A__ : str ="zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def _UpperCAmelCase ( self : int ):
A__ : Tuple ="mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
A__ : Tuple ="zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def _UpperCAmelCase ( self : Dict ):
A__ : Union[str, Any] =self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , {
# en_XX, A, test, EOS
"input_ids": [[128022, 58, 4183, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 128006,
} , )
| 656 | """simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__A : Any = logging.get_logger(__name__)
__A : Optional[Any] = {
"EleutherAI/gpt-neo-1.3B": "https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Union[str, Any] = """gpt_neo"""
__magic_name__ : Union[str, Any] = ["""past_key_values"""]
__magic_name__ : Dict = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self : Dict , UpperCamelCase__ : List[Any]=50257 , UpperCamelCase__ : Optional[Any]=2048 , UpperCamelCase__ : Tuple=2048 , UpperCamelCase__ : int=24 , UpperCamelCase__ : Dict=[[["global", "local"], 12]] , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : str=256 , UpperCamelCase__ : List[str]="gelu_new" , UpperCamelCase__ : List[str]=0.0 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : List[str]=1E-5 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[Any]=50256 , UpperCamelCase__ : List[str]=50256 , **UpperCamelCase__ : str , ):
A__ : Optional[Any] =vocab_size
A__ : Dict =max_position_embeddings
A__ : List[str] =hidden_size
A__ : List[Any] =num_layers
A__ : Tuple =num_heads
A__ : List[str] =intermediate_size
A__ : Tuple =window_size
A__ : Dict =activation_function
A__ : str =resid_dropout
A__ : Union[str, Any] =embed_dropout
A__ : List[str] =attention_dropout
A__ : Tuple =classifier_dropout
A__ : int =layer_norm_epsilon
A__ : int =initializer_range
A__ : str =use_cache
A__ : Tuple =bos_token_id
A__ : int =eos_token_id
A__ : int =attention_types
A__ : Any =self.expand_attention_types_params(UpperCamelCase__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
@staticmethod
def _UpperCAmelCase ( UpperCamelCase__ : List[str] ):
A__ : Optional[Any] =[]
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowercase ( UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ):
"""simple docstring"""
import torch
A__ : List[str] =input.size()
A__ : Dict =len(UpperCamelCase )
A__ : Optional[int] =shape[dimension]
A__ : str =torch.arange(0 , UpperCamelCase , UpperCamelCase )
A__ : Optional[int] =torch.div(sizedim - size , UpperCamelCase , rounding_mode="floor" ) + 1
A__ : str =torch.arange(UpperCamelCase ) + low_indices[:min_length][:, None]
A__ : Tuple =[slice(UpperCamelCase )] * rank
A__ : int =indices
A__ : Optional[int] =input[s]
A__ : Union[str, Any] =list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(UpperCamelCase )
def lowercase ( UpperCamelCase : str , UpperCamelCase : Any ):
"""simple docstring"""
import torch
A__ : List[str] =torch.arange(1 , UpperCamelCase )
A__ : List[Any] =torch.remainder(UpperCamelCase , UpperCamelCase )
A__ : Optional[int] =remainders == 0
A__ : str =candidates[divisor_indices]
A__ : int =torch.max(UpperCamelCase )
return largest_divisor, torch.div(UpperCamelCase , UpperCamelCase , rounding_mode="floor" )
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
@property
def _UpperCAmelCase ( self : List[Any] ):
A__ : Optional[int] =OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction="inputs" )
A__ : Optional[int] ={0: "batch", 1: "past_sequence + sequence"}
else:
A__ : Tuple ={0: "batch", 1: "sequence"}
return common_inputs
@property
def _UpperCAmelCase ( self : List[str] ):
return self._config.num_heads
def _UpperCAmelCase ( self : int , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
A__ : Union[str, Any] =super(UpperCamelCase__ , self ).generate_dummy_inputs(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
# We need to order the input in the way they appears in the forward()
A__ : List[Any] =OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A__ , A__ : Union[str, Any] =common_inputs["input_ids"].shape
# Not using the same length for past_key_values
A__ : Union[str, Any] =seqlen + 2
A__ : List[Any] =(
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
A__ : Optional[Any] =[
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(self.num_layers )
]
A__ : Optional[Any] =common_inputs["attention_mask"]
if self.use_past:
A__ : Any =ordered_inputs["attention_mask"].dtype
A__ : Tuple =torch.cat(
[ordered_inputs["attention_mask"], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
return ordered_inputs
@property
def _UpperCAmelCase ( self : List[str] ):
return 13
| 656 | 1 |
"""simple docstring"""
import socket
def lowercase ( ):
"""simple docstring"""
A__ : Dict =socket.socket(socket.AF_INET , socket.SOCK_STREAM )
A__ : int =socket.gethostname()
A__ : Union[str, Any] =12312
sock.connect((host, port) )
sock.send(B"Hello server!" )
with open("Received_file" , "wb" ) as out_file:
print("File opened" )
print("Receiving data..." )
while True:
A__ : Union[str, Any] =sock.recv(1024 )
if not data:
break
out_file.write(UpperCamelCase )
print("Successfully received the file" )
sock.close()
print("Connection closed" )
if __name__ == "__main__":
main()
| 656 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Union[str, Any] = logging.get_logger(__name__)
__A : Any = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Tuple = """megatron-bert"""
def __init__( self : Tuple , UpperCamelCase__ : Dict=29056 , UpperCamelCase__ : int=1024 , UpperCamelCase__ : Optional[int]=24 , UpperCamelCase__ : Dict=16 , UpperCamelCase__ : int=4096 , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : int=512 , UpperCamelCase__ : str=2 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : str="absolute" , UpperCamelCase__ : Dict=True , **UpperCamelCase__ : Tuple , ):
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A__ : Optional[int] =vocab_size
A__ : Optional[int] =hidden_size
A__ : str =num_hidden_layers
A__ : Any =num_attention_heads
A__ : str =hidden_act
A__ : Optional[int] =intermediate_size
A__ : str =hidden_dropout_prob
A__ : str =attention_probs_dropout_prob
A__ : List[Any] =max_position_embeddings
A__ : List[Any] =type_vocab_size
A__ : Tuple =initializer_range
A__ : Any =layer_norm_eps
A__ : Any =position_embedding_type
A__ : Union[str, Any] =use_cache
| 656 | 1 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowercase ( UpperCamelCase : Dict ):
"""simple docstring"""
A__ : Union[str, Any] =[2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
A__ : Tuple =True if "large" in model_name or "huge" in model_name else False
A__ : Optional[Any] =True if "large" in model_name or "huge" in model_name else False
A__ : Union[str, Any] =True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
A__ : Optional[int] =[3, 3, 3, 3]
A__ : List[str] =[5, 5, 5, 5]
elif "fl4" in model_name:
A__ : List[Any] =[4, 4, 4, 4]
A__ : Dict =[3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
A__ : int =[3, 3, 3, 3]
if "lrf" in model_name:
A__ : Dict =[3, 3, 3, 3]
else:
A__ : Optional[int] =[2, 2, 2, 2]
if "tiny" in model_name:
A__ : List[str] =96
elif "small" in model_name:
A__ : Dict =96
elif "base" in model_name:
A__ : Union[str, Any] =128
elif "large" in model_name:
A__ : Optional[int] =192
elif "xlarge" in model_name:
A__ : Optional[int] =256
elif "huge" in model_name:
A__ : List[Any] =352
# set label information
A__ : int ="huggingface/label-files"
if "large" in model_name or "huge" in model_name:
A__ : Dict ="imagenet-22k-id2label.json"
else:
A__ : Optional[Any] ="imagenet-1k-id2label.json"
A__ : Union[str, Any] =json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="dataset" ) , "r" ) )
A__ : List[str] ={int(UpperCamelCase ): v for k, v in idalabel.items()}
A__ : Tuple ={v: k for k, v in idalabel.items()}
A__ : Tuple =FocalNetConfig(
embed_dim=UpperCamelCase , depths=UpperCamelCase , focal_levels=UpperCamelCase , focal_windows=UpperCamelCase , use_conv_embed=UpperCamelCase , idalabel=UpperCamelCase , labelaid=UpperCamelCase , use_post_layernorm=UpperCamelCase , use_layerscale=UpperCamelCase , )
return config
def lowercase ( UpperCamelCase : Any ):
"""simple docstring"""
if "patch_embed.proj" in name:
A__ : int =name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
A__ : List[str] =name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
A__ : str ="encoder." + name
if "encoder.layers" in name:
A__ : Union[str, Any] =name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
A__ : Tuple =name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
A__ : int =name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
A__ : List[Any] =name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
A__ : Union[str, Any] =name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
A__ : int =name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
A__ : List[Any] ="layernorm.weight"
if name == "norm.bias":
A__ : List[Any] ="layernorm.bias"
if "head" in name:
A__ : Union[str, Any] =name.replace("head" , "classifier" )
else:
A__ : Dict ="focalnet." + name
return name
def lowercase ( UpperCamelCase : Any , UpperCamelCase : int , UpperCamelCase : int=False ):
"""simple docstring"""
# fmt: off
A__ : Tuple ={
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
A__ : Tuple =model_name_to_url[model_name]
print("Checkpoint URL: " , UpperCamelCase )
A__ : Tuple =torch.hub.load_state_dict_from_url(UpperCamelCase , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
A__ : Tuple =state_dict.pop(UpperCamelCase )
A__ : Any =val
A__ : Tuple =get_focalnet_config(UpperCamelCase )
A__ : Union[str, Any] =FocalNetForImageClassification(UpperCamelCase )
model.eval()
# load state dict
model.load_state_dict(UpperCamelCase )
# verify conversion
A__ : List[str] ="http://images.cocodataset.org/val2017/000000039769.jpg"
A__ : Optional[Any] =BitImageProcessor(
do_resize=UpperCamelCase , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCamelCase , crop_size=224 , do_normalize=UpperCamelCase , image_mean=UpperCamelCase , image_std=UpperCamelCase , )
A__ : Tuple =Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
A__ : str =processor(images=UpperCamelCase , return_tensors="pt" )
A__ : int =transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
A__ : Any =image_transforms(UpperCamelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , UpperCamelCase , atol=1E-4 )
A__ : Optional[int] =model(**UpperCamelCase )
A__ : Optional[Any] =outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
A__ : Any =torch.tensor([0.21_66, -0.43_68, 0.21_91] )
elif model_name == "focalnet-tiny-lrf":
A__ : Dict =torch.tensor([1.16_69, 0.01_25, -0.16_95] )
elif model_name == "focalnet-small":
A__ : List[Any] =torch.tensor([0.49_17, -0.04_30, 0.13_41] )
elif model_name == "focalnet-small-lrf":
A__ : Union[str, Any] =torch.tensor([-0.25_88, -0.53_42, -0.23_31] )
elif model_name == "focalnet-base":
A__ : Dict =torch.tensor([-0.16_55, -0.40_90, -0.17_30] )
elif model_name == "focalnet-base-lrf":
A__ : str =torch.tensor([0.53_06, -0.04_83, -0.39_28] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
__A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="focalnet-tiny",
type=str,
help="Name of the FocalNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub.",
)
__A : List[str] = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 656 | """simple docstring"""
from __future__ import annotations
def lowercase ( UpperCamelCase : list[float] ):
"""simple docstring"""
if len(UpperCamelCase ) < 2:
raise ValueError("Monogons and Digons are not polygons in the Euclidean space" )
if any(i <= 0 for i in nums ):
raise ValueError("All values must be greater than 0" )
A__ : Union[str, Any] =nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 656 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
__A : List[str] = logging.get_logger(__name__)
__A : List[str] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A : Dict = {
"vocab_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/vocab.txt",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/vocab.txt",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"
),
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"
),
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt",
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"
),
"bert-base-multilingual-cased": (
"https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"
),
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-cased": (
"https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"
),
},
}
__A : int = {
"bert-base-uncased": 512,
"bert-large-uncased": 512,
"bert-base-cased": 512,
"bert-large-cased": 512,
"bert-base-multilingual-uncased": 512,
"bert-base-multilingual-cased": 512,
"bert-base-chinese": 512,
"bert-base-german-cased": 512,
"bert-large-uncased-whole-word-masking": 512,
"bert-large-cased-whole-word-masking": 512,
"bert-large-uncased-whole-word-masking-finetuned-squad": 512,
"bert-large-cased-whole-word-masking-finetuned-squad": 512,
"bert-base-cased-finetuned-mrpc": 512,
"bert-base-german-dbmdz-cased": 512,
"bert-base-german-dbmdz-uncased": 512,
"TurkuNLP/bert-base-finnish-cased-v1": 512,
"TurkuNLP/bert-base-finnish-uncased-v1": 512,
"wietsedv/bert-base-dutch-cased": 512,
}
__A : Optional[int] = {
"bert-base-uncased": {"do_lower_case": True},
"bert-large-uncased": {"do_lower_case": True},
"bert-base-cased": {"do_lower_case": False},
"bert-large-cased": {"do_lower_case": False},
"bert-base-multilingual-uncased": {"do_lower_case": True},
"bert-base-multilingual-cased": {"do_lower_case": False},
"bert-base-chinese": {"do_lower_case": False},
"bert-base-german-cased": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking": {"do_lower_case": True},
"bert-large-cased-whole-word-masking": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking-finetuned-squad": {"do_lower_case": True},
"bert-large-cased-whole-word-masking-finetuned-squad": {"do_lower_case": False},
"bert-base-cased-finetuned-mrpc": {"do_lower_case": False},
"bert-base-german-dbmdz-cased": {"do_lower_case": False},
"bert-base-german-dbmdz-uncased": {"do_lower_case": True},
"TurkuNLP/bert-base-finnish-cased-v1": {"do_lower_case": False},
"TurkuNLP/bert-base-finnish-uncased-v1": {"do_lower_case": True},
"wietsedv/bert-base-dutch-cased": {"do_lower_case": False},
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : List[str] = VOCAB_FILES_NAMES
__magic_name__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : int = PRETRAINED_INIT_CONFIGURATION
__magic_name__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Optional[int] = BertTokenizer
def __init__( self : Optional[Any] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Dict="[UNK]" , UpperCamelCase__ : str="[SEP]" , UpperCamelCase__ : List[Any]="[PAD]" , UpperCamelCase__ : Dict="[CLS]" , UpperCamelCase__ : List[str]="[MASK]" , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[Any]=None , **UpperCamelCase__ : Optional[Any] , ):
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , tokenize_chinese_chars=UpperCamelCase__ , strip_accents=UpperCamelCase__ , **UpperCamelCase__ , )
A__ : Dict =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCamelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCamelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCamelCase__ ) != tokenize_chinese_chars
):
A__ : Union[str, Any] =getattr(UpperCamelCase__ , normalizer_state.pop("type" ) )
A__ : Dict =do_lower_case
A__ : Dict =strip_accents
A__ : Optional[int] =tokenize_chinese_chars
A__ : List[str] =normalizer_class(**UpperCamelCase__ )
A__ : Optional[Any] =do_lower_case
def _UpperCAmelCase ( self : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str=None ):
A__ : int =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCAmelCase ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : Tuple =[self.sep_token_id]
A__ : Optional[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
A__ : Any =self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
| 656 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__A : Optional[Any] = {
"configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
"MEGA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegaForCausalLM",
"MegaForMaskedLM",
"MegaForMultipleChoice",
"MegaForQuestionAnswering",
"MegaForSequenceClassification",
"MegaForTokenClassification",
"MegaModel",
"MegaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__A : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 656 | 1 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : int = (DDIMParallelScheduler,)
__magic_name__ : List[Any] = (("""eta""", 0.0), ("""num_inference_steps""", 50))
def _UpperCAmelCase ( self : Optional[Any] , **UpperCamelCase__ : Tuple ):
A__ : Tuple ={
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**UpperCamelCase__ )
return config
def _UpperCAmelCase ( self : Optional[int] , **UpperCamelCase__ : Any ):
A__ : str =self.scheduler_classes[0]
A__ : Any =self.get_scheduler_config(**UpperCamelCase__ )
A__ : str =scheduler_class(**UpperCamelCase__ )
A__ , A__ : Tuple =10, 0.0
A__ : Optional[Any] =self.dummy_model()
A__ : str =self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase__ )
for t in scheduler.timesteps:
A__ : List[Any] =model(UpperCamelCase__ , UpperCamelCase__ )
A__ : Any =scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
return sample
def _UpperCAmelCase ( self : Tuple ):
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def _UpperCAmelCase ( self : Union[str, Any] ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=UpperCamelCase__ )
A__ : Optional[int] =self.scheduler_classes[0]
A__ : Tuple =self.get_scheduler_config(steps_offset=1 )
A__ : Union[str, Any] =scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def _UpperCAmelCase ( self : List[str] ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCamelCase__ , beta_end=UpperCamelCase__ )
def _UpperCAmelCase ( self : Any ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCamelCase__ )
def _UpperCAmelCase ( self : Optional[Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def _UpperCAmelCase ( self : List[str] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase__ )
def _UpperCAmelCase ( self : List[Any] ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=UpperCamelCase__ )
def _UpperCAmelCase ( self : Optional[Any] ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=UpperCamelCase__ )
def _UpperCAmelCase ( self : int ):
self.check_over_configs(thresholding=UpperCamelCase__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCamelCase__ , prediction_type=UpperCamelCase__ , sample_max_value=UpperCamelCase__ , )
def _UpperCAmelCase ( self : List[str] ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=UpperCamelCase__ )
def _UpperCAmelCase ( self : int ):
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=UpperCamelCase__ , num_inference_steps=UpperCamelCase__ )
def _UpperCAmelCase ( self : Optional[Any] ):
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=UpperCamelCase__ , eta=UpperCamelCase__ )
def _UpperCAmelCase ( self : Union[str, Any] ):
A__ : str =self.scheduler_classes[0]
A__ : List[Any] =self.get_scheduler_config()
A__ : Tuple =scheduler_class(**UpperCamelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14771 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32460 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5
def _UpperCAmelCase ( self : Optional[Any] ):
A__ : Dict =self.scheduler_classes[0]
A__ : Optional[int] =self.get_scheduler_config()
A__ : str =scheduler_class(**UpperCamelCase__ )
A__ , A__ : Optional[Any] =10, 0.0
scheduler.set_timesteps(UpperCamelCase__ )
A__ : Dict =self.dummy_model()
A__ : Union[str, Any] =self.dummy_sample_deter
A__ : Optional[int] =self.dummy_sample_deter + 0.1
A__ : str =self.dummy_sample_deter - 0.1
A__ : Optional[int] =samplea.shape[0]
A__ : Dict =torch.stack([samplea, samplea, samplea] , dim=0 )
A__ : Dict =torch.arange(UpperCamelCase__ )[0:3, None].repeat(1 , UpperCamelCase__ )
A__ : Dict =model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
A__ : int =scheduler.batch_step_no_noise(UpperCamelCase__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , UpperCamelCase__ )
A__ : Optional[int] =torch.sum(torch.abs(UpperCamelCase__ ) )
A__ : Union[str, Any] =torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 1147.7904 ) < 1E-2
assert abs(result_mean.item() - 0.4982 ) < 1E-3
def _UpperCAmelCase ( self : List[Any] ):
A__ : List[str] =self.full_loop()
A__ : List[Any] =torch.sum(torch.abs(UpperCamelCase__ ) )
A__ : Dict =torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 172.0067 ) < 1E-2
assert abs(result_mean.item() - 0.223967 ) < 1E-3
def _UpperCAmelCase ( self : List[str] ):
A__ : Union[str, Any] =self.full_loop(prediction_type="v_prediction" )
A__ : Tuple =torch.sum(torch.abs(UpperCamelCase__ ) )
A__ : Any =torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 52.5302 ) < 1E-2
assert abs(result_mean.item() - 0.0684 ) < 1E-3
def _UpperCAmelCase ( self : List[str] ):
# We specify different beta, so that the first alpha is 0.99
A__ : List[str] =self.full_loop(set_alpha_to_one=UpperCamelCase__ , beta_start=0.01 )
A__ : Union[str, Any] =torch.sum(torch.abs(UpperCamelCase__ ) )
A__ : List[Any] =torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 149.8295 ) < 1E-2
assert abs(result_mean.item() - 0.1951 ) < 1E-3
def _UpperCAmelCase ( self : Optional[Any] ):
# We specify different beta, so that the first alpha is 0.99
A__ : Optional[Any] =self.full_loop(set_alpha_to_one=UpperCamelCase__ , beta_start=0.01 )
A__ : int =torch.sum(torch.abs(UpperCamelCase__ ) )
A__ : Optional[Any] =torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 149.0784 ) < 1E-2
assert abs(result_mean.item() - 0.1941 ) < 1E-3
| 656 | """simple docstring"""
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
if num <= 0:
raise ValueError("Input must be a positive integer" )
A__ : Union[str, Any] =[True] * (num + 1)
A__ : Union[str, Any] =2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , UpperCamelCase ):
A__ : str =False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Optional[int] = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 656 | 1 |
"""simple docstring"""
import os
def lowercase ( ):
"""simple docstring"""
A__ : List[Any] =os.path.dirname(os.path.realpath(UpperCamelCase ) )
A__ : str =os.path.join(UpperCamelCase , "triangle.txt" )
with open(UpperCamelCase ) as f:
A__ : Optional[int] =f.readlines()
A__ : str =[]
for line in triangle:
A__ : Union[str, Any] =[]
for number in line.strip().split(" " ):
numbers_from_line.append(int(UpperCamelCase ) )
a.append(UpperCamelCase )
for i in range(1 , len(UpperCamelCase ) ):
for j in range(len(a[i] ) ):
A__ : Union[str, Any] =a[i - 1][j] if j != len(a[i - 1] ) else 0
A__ : Union[str, Any] =a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(UpperCamelCase , UpperCamelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 656 | """simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
def _UpperCAmelCase ( self : List[Any] ):
A__ : Tuple =torch.nn.Linear(10 , 10 )
A__ : List[str] =torch.optim.SGD(model.parameters() , 0.1 )
A__ : Union[str, Any] =Accelerator()
A__ : str =accelerator.prepare(UpperCamelCase__ )
try:
pickle.loads(pickle.dumps(UpperCamelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 656 | 1 |
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ):
A__ : str ={}
def _UpperCAmelCase ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int]=1 ):
if self.graph.get(UpperCamelCase__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
A__ : str =[[w, v]]
if not self.graph.get(UpperCamelCase__ ):
A__ : Any =[]
def _UpperCAmelCase ( self : Tuple ):
return list(self.graph )
def _UpperCAmelCase ( self : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int ):
if self.graph.get(UpperCamelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(UpperCamelCase__ )
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Any=-2 , UpperCamelCase__ : Dict=-1 ):
if s == d:
return []
A__ : Optional[int] =[]
A__ : Any =[]
if s == -2:
A__ : Union[str, Any] =list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
A__ : Union[str, Any] =s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ : str =s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(UpperCamelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A__ : Dict =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(UpperCamelCase__ ) != 0:
A__ : Dict =stack[len(UpperCamelCase__ ) - 1]
else:
A__ : Union[str, Any] =ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return visited
def _UpperCAmelCase ( self : List[Any] , UpperCamelCase__ : Any=-1 ):
if c == -1:
A__ : Optional[int] =floor(random() * 10000 ) + 10
for i in range(UpperCamelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A__ : List[str] =floor(random() * c ) + 1
if n != i:
self.add_pair(UpperCamelCase__ , UpperCamelCase__ , 1 )
def _UpperCAmelCase ( self : str , UpperCamelCase__ : Tuple=-2 ):
A__ : Any =deque()
A__ : Dict =[]
if s == -2:
A__ : str =list(self.graph )[0]
d.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
while d:
A__ : str =d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _UpperCAmelCase ( self : str , UpperCamelCase__ : str ):
A__ : Tuple =0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : Tuple ):
return len(self.graph[u] )
def _UpperCAmelCase ( self : str , UpperCamelCase__ : str=-2 ):
A__ : Optional[Any] =[]
A__ : Tuple =[]
if s == -2:
A__ : str =list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
A__ : Optional[int] =s
A__ : Dict =[]
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ : str =s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ : int =node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(UpperCamelCase__ ) != 0:
A__ : List[str] =stack[len(UpperCamelCase__ ) - 1]
else:
A__ : List[Any] =ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return sorted_nodes
def _UpperCAmelCase ( self : Tuple ):
A__ : Dict =[]
A__ : Dict =[]
A__ : Optional[Any] =list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
A__ : Dict =-2
A__ : Union[str, Any] =[]
A__ : List[Any] =s
A__ : Dict =False
A__ : int =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ : Dict =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A__ : Optional[int] =len(UpperCamelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ : List[str] =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A__ : str =True
if len(UpperCamelCase__ ) != 0:
A__ : Optional[Any] =stack[len(UpperCamelCase__ ) - 1]
else:
A__ : Tuple =False
indirect_parents.append(UpperCamelCase__ )
A__ : Tuple =s
A__ : Tuple =ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return list(UpperCamelCase__ )
def _UpperCAmelCase ( self : Optional[int] ):
A__ : int =[]
A__ : Optional[Any] =[]
A__ : int =list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
A__ : Dict =-2
A__ : Optional[Any] =[]
A__ : Optional[Any] =s
A__ : List[Any] =False
A__ : int =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ : List[Any] =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A__ : Optional[Any] =len(UpperCamelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ : List[Any] =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A__ : str =True
if len(UpperCamelCase__ ) != 0:
A__ : List[str] =stack[len(UpperCamelCase__ ) - 1]
else:
A__ : int =False
indirect_parents.append(UpperCamelCase__ )
A__ : Any =s
A__ : Optional[Any] =ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return False
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : List[Any]=-2 , UpperCamelCase__ : Dict=-1 ):
A__ : Optional[Any] =time()
self.dfs(UpperCamelCase__ , UpperCamelCase__ )
A__ : Optional[Any] =time()
return end - begin
def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : str=-2 ):
A__ : Tuple =time()
self.bfs(UpperCamelCase__ )
A__ : Dict =time()
return end - begin
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ):
A__ : int ={}
def _UpperCAmelCase ( self : str , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int=1 ):
# check if the u exists
if self.graph.get(UpperCamelCase__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
A__ : Optional[Any] =[[w, v]]
# add the other way
if self.graph.get(UpperCamelCase__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
A__ : Optional[Any] =[[w, u]]
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] ):
if self.graph.get(UpperCamelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(UpperCamelCase__ )
# the other way round
if self.graph.get(UpperCamelCase__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(UpperCamelCase__ )
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : int=-2 , UpperCamelCase__ : Union[str, Any]=-1 ):
if s == d:
return []
A__ : str =[]
A__ : List[str] =[]
if s == -2:
A__ : str =list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
A__ : str =s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ : Dict =s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(UpperCamelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A__ : Optional[int] =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(UpperCamelCase__ ) != 0:
A__ : Tuple =stack[len(UpperCamelCase__ ) - 1]
else:
A__ : Optional[Any] =ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return visited
def _UpperCAmelCase ( self : List[Any] , UpperCamelCase__ : int=-1 ):
if c == -1:
A__ : int =floor(random() * 10000 ) + 10
for i in range(UpperCamelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A__ : Tuple =floor(random() * c ) + 1
if n != i:
self.add_pair(UpperCamelCase__ , UpperCamelCase__ , 1 )
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : str=-2 ):
A__ : List[str] =deque()
A__ : Optional[Any] =[]
if s == -2:
A__ : Union[str, Any] =list(self.graph )[0]
d.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
while d:
A__ : List[Any] =d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : Optional[Any] ):
return len(self.graph[u] )
def _UpperCAmelCase ( self : Dict ):
A__ : Any =[]
A__ : str =[]
A__ : List[Any] =list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
A__ : List[str] =-2
A__ : List[str] =[]
A__ : Union[str, Any] =s
A__ : int =False
A__ : str =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ : str =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A__ : Tuple =len(UpperCamelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ : str =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A__ : Union[str, Any] =True
if len(UpperCamelCase__ ) != 0:
A__ : Optional[Any] =stack[len(UpperCamelCase__ ) - 1]
else:
A__ : int =False
indirect_parents.append(UpperCamelCase__ )
A__ : int =s
A__ : Optional[int] =ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return list(UpperCamelCase__ )
def _UpperCAmelCase ( self : List[str] ):
A__ : List[Any] =[]
A__ : List[Any] =[]
A__ : List[str] =list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
A__ : Optional[Any] =-2
A__ : Optional[int] =[]
A__ : int =s
A__ : Optional[Any] =False
A__ : str =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ : Any =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A__ : str =len(UpperCamelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ : List[Any] =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A__ : Optional[Any] =True
if len(UpperCamelCase__ ) != 0:
A__ : Union[str, Any] =stack[len(UpperCamelCase__ ) - 1]
else:
A__ : str =False
indirect_parents.append(UpperCamelCase__ )
A__ : Union[str, Any] =s
A__ : Optional[int] =ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return False
def _UpperCAmelCase ( self : List[Any] ):
return list(self.graph )
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : List[Any]=-2 , UpperCamelCase__ : Optional[int]=-1 ):
A__ : List[Any] =time()
self.dfs(UpperCamelCase__ , UpperCamelCase__ )
A__ : Any =time()
return end - begin
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : Dict=-2 ):
A__ : str =time()
self.bfs(UpperCamelCase__ )
A__ : int =time()
return end - begin
| 656 | """simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__A : Optional[int] = None
__A : Union[str, Any] = logging.get_logger(__name__)
__A : List[Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__A : str = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
},
"tokenizer_file": {
"google/bigbird-roberta-base": (
"https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"
),
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"
),
},
}
__A : List[str] = {
"google/bigbird-roberta-base": 4_096,
"google/bigbird-roberta-large": 4_096,
"google/bigbird-base-trivia-itc": 4_096,
}
__A : Tuple = "▁"
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Dict = VOCAB_FILES_NAMES
__magic_name__ : Any = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : List[Any] = BigBirdTokenizer
__magic_name__ : Any = ["""input_ids""", """attention_mask"""]
__magic_name__ : List[int] = []
def __init__( self : str , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Union[str, Any]="<unk>" , UpperCamelCase__ : str="<s>" , UpperCamelCase__ : int="</s>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[Any]="[SEP]" , UpperCamelCase__ : List[Any]="[MASK]" , UpperCamelCase__ : str="[CLS]" , **UpperCamelCase__ : List[Any] , ):
A__ : Optional[int] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else bos_token
A__ : Optional[Any] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token
A__ : Optional[int] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else unk_token
A__ : int =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token
A__ : str =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cls_token
A__ : List[Any] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
A__ : str =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , **UpperCamelCase__ , )
A__ : List[Any] =vocab_file
A__ : Optional[int] =False if not self.vocab_file else True
def _UpperCAmelCase ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : Tuple =[self.sep_token_id]
A__ : str =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1]
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : Tuple =[self.sep_token_id]
A__ : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A__ : List[str] =os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
return (out_vocab_file,)
| 656 | 1 |
"""simple docstring"""
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
__A : Any = {
"sample_size": 32,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": 1_000,
"block_out_channels": [32, 64],
"attention_head_dim": 8,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
__A : Any = {
"sample_size": 64,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 3,
"num_class_embeds": 1_000,
"block_out_channels": [192, 192 * 2, 192 * 3, 192 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
__A : Optional[Any] = {
"sample_size": 256,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": None,
"block_out_channels": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "default",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
__A : List[str] = {
"num_train_timesteps": 40,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
__A : Optional[Any] = {
"num_train_timesteps": 201,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
__A : str = {
"num_train_timesteps": 151,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
def lowercase ( UpperCamelCase : List[str] ):
"""simple docstring"""
if isinstance(UpperCamelCase , UpperCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected" )
def lowercase ( UpperCamelCase : Dict , UpperCamelCase : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Dict=False ):
"""simple docstring"""
A__ : Tuple =checkpoint[F'''{old_prefix}.in_layers.0.weight''']
A__ : Dict =checkpoint[F'''{old_prefix}.in_layers.0.bias''']
A__ : int =checkpoint[F'''{old_prefix}.in_layers.2.weight''']
A__ : List[Any] =checkpoint[F'''{old_prefix}.in_layers.2.bias''']
A__ : Optional[Any] =checkpoint[F'''{old_prefix}.emb_layers.1.weight''']
A__ : List[Any] =checkpoint[F'''{old_prefix}.emb_layers.1.bias''']
A__ : Optional[int] =checkpoint[F'''{old_prefix}.out_layers.0.weight''']
A__ : List[str] =checkpoint[F'''{old_prefix}.out_layers.0.bias''']
A__ : Any =checkpoint[F'''{old_prefix}.out_layers.3.weight''']
A__ : int =checkpoint[F'''{old_prefix}.out_layers.3.bias''']
if has_skip:
A__ : Dict =checkpoint[F'''{old_prefix}.skip_connection.weight''']
A__ : str =checkpoint[F'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def lowercase ( UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : int=None ):
"""simple docstring"""
A__ , A__ , A__ : Any =checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
A__ , A__ , A__ : int =checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
A__ : List[str] =checkpoint[F'''{old_prefix}.norm.weight''']
A__ : int =checkpoint[F'''{old_prefix}.norm.bias''']
A__ : List[Any] =weight_q.squeeze(-1 ).squeeze(-1 )
A__ : List[Any] =bias_q.squeeze(-1 ).squeeze(-1 )
A__ : str =weight_k.squeeze(-1 ).squeeze(-1 )
A__ : List[Any] =bias_k.squeeze(-1 ).squeeze(-1 )
A__ : int =weight_v.squeeze(-1 ).squeeze(-1 )
A__ : str =bias_v.squeeze(-1 ).squeeze(-1 )
A__ : str =(
checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
A__ : int =checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowercase ( UpperCamelCase : str , UpperCamelCase : Any ):
"""simple docstring"""
A__ : str =torch.load(UpperCamelCase , map_location="cpu" )
A__ : Optional[int] ={}
A__ : Any =checkpoint["time_embed.0.weight"]
A__ : Tuple =checkpoint["time_embed.0.bias"]
A__ : Tuple =checkpoint["time_embed.2.weight"]
A__ : List[str] =checkpoint["time_embed.2.bias"]
if unet_config["num_class_embeds"] is not None:
A__ : Union[str, Any] =checkpoint["label_emb.weight"]
A__ : List[Any] =checkpoint["input_blocks.0.0.weight"]
A__ : List[str] =checkpoint["input_blocks.0.0.bias"]
A__ : Tuple =unet_config["down_block_types"]
A__ : Union[str, Any] =unet_config["layers_per_block"]
A__ : Dict =unet_config["attention_head_dim"]
A__ : Union[str, Any] =unet_config["block_out_channels"]
A__ : Any =1
A__ : Optional[int] =channels_list[0]
for i, layer_type in enumerate(UpperCamelCase ):
A__ : int =channels_list[i]
A__ : List[str] =current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(UpperCamelCase ):
A__ : Union[str, Any] =F'''down_blocks.{i}.resnets.{j}'''
A__ : Union[str, Any] =F'''input_blocks.{current_layer}.0'''
A__ : Optional[int] =True if j == 0 and downsample_block_has_skip else False
A__ : str =convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , has_skip=UpperCamelCase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(UpperCamelCase ):
A__ : Dict =F'''down_blocks.{i}.resnets.{j}'''
A__ : int =F'''input_blocks.{current_layer}.0'''
A__ : List[str] =True if j == 0 and downsample_block_has_skip else False
A__ : Optional[int] =convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , has_skip=UpperCamelCase )
A__ : Tuple =F'''down_blocks.{i}.attentions.{j}'''
A__ : Dict =F'''input_blocks.{current_layer}.1'''
A__ : Dict =convert_attention(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
current_layer += 1
if i != len(UpperCamelCase ) - 1:
A__ : List[Any] =F'''down_blocks.{i}.downsamplers.0'''
A__ : Tuple =F'''input_blocks.{current_layer}.0'''
A__ : List[str] =convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
current_layer += 1
A__ : Union[str, Any] =current_channels
# hardcoded the mid-block for now
A__ : int ="mid_block.resnets.0"
A__ : str ="middle_block.0"
A__ : Dict =convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ : Dict ="mid_block.attentions.0"
A__ : Union[str, Any] ="middle_block.1"
A__ : List[str] =convert_attention(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ : str ="mid_block.resnets.1"
A__ : List[Any] ="middle_block.2"
A__ : Any =convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ : Any =0
A__ : List[Any] =unet_config["up_block_types"]
for i, layer_type in enumerate(UpperCamelCase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
A__ : Any =F'''up_blocks.{i}.resnets.{j}'''
A__ : Dict =F'''output_blocks.{current_layer}.0'''
A__ : List[Any] =convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , has_skip=UpperCamelCase )
current_layer += 1
if i != len(UpperCamelCase ) - 1:
A__ : List[str] =F'''up_blocks.{i}.upsamplers.0'''
A__ : Optional[int] =F'''output_blocks.{current_layer-1}.1'''
A__ : Tuple =convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
A__ : Union[str, Any] =F'''up_blocks.{i}.resnets.{j}'''
A__ : str =F'''output_blocks.{current_layer}.0'''
A__ : Union[str, Any] =convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , has_skip=UpperCamelCase )
A__ : Optional[int] =F'''up_blocks.{i}.attentions.{j}'''
A__ : int =F'''output_blocks.{current_layer}.1'''
A__ : int =convert_attention(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
current_layer += 1
if i != len(UpperCamelCase ) - 1:
A__ : Tuple =F'''up_blocks.{i}.upsamplers.0'''
A__ : List[Any] =F'''output_blocks.{current_layer-1}.2'''
A__ : int =convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ : List[Any] =checkpoint["out.0.weight"]
A__ : Any =checkpoint["out.0.bias"]
A__ : List[Any] =checkpoint["out.2.weight"]
A__ : Union[str, Any] =checkpoint["out.2.bias"]
return new_checkpoint
if __name__ == "__main__":
__A : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.")
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model."
)
parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.")
__A : Optional[int] = parser.parse_args()
__A : str = strabool(args.class_cond)
__A : Tuple = os.path.basename(args.unet_path)
print(f"""Checkpoint: {ckpt_name}""")
# Get U-Net config
if "imagenet64" in ckpt_name:
__A : Optional[int] = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__A : List[Any] = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
__A : Any = TEST_UNET_CONFIG
else:
raise ValueError(f"""Checkpoint type {ckpt_name} is not currently supported.""")
if not args.class_cond:
__A : Union[str, Any] = None
__A : Optional[Any] = con_pt_to_diffuser(args.unet_path, unet_config)
__A : Union[str, Any] = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
__A : str = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
__A : int = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__A : Dict = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f"""Checkpoint type {ckpt_name} is not currently supported.""")
__A : List[str] = CMStochasticIterativeScheduler(**scheduler_config)
__A : Dict = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 656 | """simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Optional[int] = {"vocab_file": "spiece.model"}
__A : List[Any] = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Dict="<s>" , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : Union[str, Any]="<unk>" , UpperCamelCase__ : Optional[int]="<sep>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[int]="<cls>" , UpperCamelCase__ : List[str]="<mask>" , UpperCamelCase__ : Optional[Any]=["<eop>", "<eod>"] , UpperCamelCase__ : Optional[Dict[str, Any]] = None , **UpperCamelCase__ : Dict , ):
A__ : List[str] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
A__ : Tuple ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
A__ : Dict =3
A__ : int =do_lower_case
A__ : str =remove_space
A__ : Optional[Any] =keep_accents
A__ : int =vocab_file
A__ : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
A__ : Union[str, Any] =jieba
A__ : List[str] =str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _UpperCAmelCase ( self : Union[str, Any] ):
return len(self.sp_model )
def _UpperCAmelCase ( self : Optional[int] ):
A__ : Any ={self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ):
A__ : Union[str, Any] =self.__dict__.copy()
A__ : Tuple =None
return state
def __setstate__( self : Tuple , UpperCamelCase__ : int ):
A__ : Union[str, Any] =d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A__ : Optional[int] ={}
A__ : Union[str, Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Dict ):
if self.remove_space:
A__ : Optional[int] =" ".join(inputs.strip().split() )
else:
A__ : Optional[Any] =inputs
A__ : Any =outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
A__ : Optional[Any] =unicodedata.normalize("NFKD" , UpperCamelCase__ )
A__ : Tuple ="".join([c for c in outputs if not unicodedata.combining(UpperCamelCase__ )] )
if self.do_lower_case:
A__ : str =outputs.lower()
return outputs
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : str ):
A__ : Optional[int] =self.preprocess_text(UpperCamelCase__ )
A__ : Dict =self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
A__ : List[str] =[]
for piece in pieces:
if len(UpperCamelCase__ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
A__ : str =self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase__ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A__ : Union[str, Any] =cur_pieces[1:]
else:
A__ : List[str] =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCamelCase__ )
else:
new_pieces.append(UpperCamelCase__ )
return new_pieces
def _UpperCAmelCase ( self : int , UpperCamelCase__ : str ):
return self.sp_model.PieceToId(UpperCamelCase__ )
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : List[Any] ):
return self.sp_model.IdToPiece(UpperCamelCase__ )
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : str ):
A__ : Optional[int] ="".join(UpperCamelCase__ ).replace(UpperCamelCase__ , " " ).strip()
return out_string
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : List[str] =[self.sep_token_id]
A__ : str =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is not None:
return ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1, 1]
return ([0] * len(UpperCamelCase__ )) + [1, 1]
def _UpperCAmelCase ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : List[str] =[self.sep_token_id]
A__ : Optional[Any] =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A__ : Tuple =os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
A__ : Optional[Any] =self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
def _UpperCAmelCase ( self : str , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : int ):
A__ : List[Any] =super()._decode(*UpperCamelCase__ , **UpperCamelCase__ )
A__ : Union[str, Any] =text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text
| 656 | 1 |
"""simple docstring"""
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
__A : List[Any] = 10
def lowercase ( UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ):
"""simple docstring"""
for i in range(UpperCamelCase , UpperCamelCase ):
if array[i] == target:
return i
return -1
def lowercase ( UpperCamelCase : list[int] , UpperCamelCase : int ):
"""simple docstring"""
A__ : Optional[Any] =0
A__ : Union[str, Any] =len(UpperCamelCase )
while left <= right:
if right - left < precision:
return lin_search(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ : Tuple =(left + right) // 3 + 1
A__ : Optional[Any] =2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
A__ : Optional[Any] =one_third - 1
elif array[two_third] < target:
A__ : int =two_third + 1
else:
A__ : str =one_third + 1
A__ : Tuple =two_third - 1
else:
return -1
def lowercase ( UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ):
"""simple docstring"""
if left < right:
if right - left < precision:
return lin_search(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ : Union[str, Any] =(left + right) // 3 + 1
A__ : List[str] =2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(UpperCamelCase , one_third - 1 , UpperCamelCase , UpperCamelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , UpperCamelCase , UpperCamelCase , UpperCamelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , UpperCamelCase , UpperCamelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : List[Any] = input("Enter numbers separated by comma:\n").strip()
__A : str = [int(item.strip()) for item in user_input.split(",")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
__A : Union[str, Any] = int(input("Enter the number to be found in the list:\n").strip())
__A : Tuple = ite_ternary_search(collection, target)
__A : Optional[int] = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"""Iterative search: {target} found at positions: {resulta}""")
print(f"""Recursive search: {target} found at positions: {resulta}""")
else:
print("Not found")
| 656 | """simple docstring"""
def lowercase ( UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ):
"""simple docstring"""
def count_of_possible_combinations(UpperCamelCase : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(UpperCamelCase )
def lowercase ( UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ):
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
UpperCamelCase : int , UpperCamelCase : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
A__ : str =sum(
count_of_possible_combinations_with_dp_array(target - item , UpperCamelCase )
for item in array )
A__ : List[str] =answer
return answer
A__ : List[Any] =[-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(UpperCamelCase , UpperCamelCase )
def lowercase ( UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ):
"""simple docstring"""
A__ : str =[0] * (target + 1)
A__ : Optional[Any] =1
for i in range(1 , target + 1 ):
for j in range(UpperCamelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Optional[Any] = 3
__A : Optional[Any] = 5
__A : int = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 656 | 1 |
"""simple docstring"""
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__A : Optional[Any] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__A : Union[str, Any] = direct_transformers_import(PATH_TO_TRANSFORMERS)
__A : Any = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__A : Dict = re.compile(R"\[(.+?)\]\((https://huggingface\.co/.+?)\)")
__A : Dict = {
"DecisionTransformerConfig",
"EncoderDecoderConfig",
"MusicgenConfig",
"RagConfig",
"SpeechEncoderDecoderConfig",
"TimmBackboneConfig",
"VisionEncoderDecoderConfig",
"VisionTextDualEncoderConfig",
"LlamaConfig",
}
def lowercase ( UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A__ : Optional[int] =None
# source code of `config_class`
A__ : int =inspect.getsource(UpperCamelCase )
A__ : Tuple =_re_checkpoint.findall(UpperCamelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
A__ : int =ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
A__ : Any =F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
A__ : Dict =ckpt_name
break
return checkpoint
def lowercase ( ):
"""simple docstring"""
A__ : Tuple =[]
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
A__ : Dict =get_checkpoint_from_config_class(UpperCamelCase )
A__ : Optional[Any] =config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(UpperCamelCase )
if len(UpperCamelCase ) > 0:
A__ : str ="\n".join(sorted(UpperCamelCase ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 656 | """simple docstring"""
import math
import tensorflow as tf
from packaging import version
def lowercase ( UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A__ : List[Any] =tf.convert_to_tensor(UpperCamelCase )
A__ : List[Any] =0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def lowercase ( UpperCamelCase : Optional[int] ):
"""simple docstring"""
A__ : Optional[Any] =tf.convert_to_tensor(UpperCamelCase )
A__ : Tuple =tf.cast(math.pi , x.dtype )
A__ : Dict =tf.cast(0.04_47_15 , x.dtype )
A__ : Optional[int] =0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(UpperCamelCase , 3 )) ))
return x * cdf
def lowercase ( UpperCamelCase : Optional[int] ):
"""simple docstring"""
A__ : List[str] =tf.convert_to_tensor(UpperCamelCase )
return x * tf.tanh(tf.math.softplus(UpperCamelCase ) )
def lowercase ( UpperCamelCase : List[str] ):
"""simple docstring"""
A__ : Union[str, Any] =tf.convert_to_tensor(UpperCamelCase )
A__ : List[Any] =tf.cast(0.04_47_15 , x.dtype )
A__ : List[Any] =tf.cast(0.79_78_84_56_08 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowercase ( UpperCamelCase : List[Any] ):
"""simple docstring"""
A__ : List[str] =tf.convert_to_tensor(UpperCamelCase )
A__ : str =tf.cast(1.7_02 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowercase ( UpperCamelCase : Tuple ):
"""simple docstring"""
return tf.clip_by_value(_gelu(UpperCamelCase ) , -10 , 10 )
def lowercase ( UpperCamelCase : str , UpperCamelCase : Any=-1 ):
"""simple docstring"""
A__ , A__ : Optional[Any] =tf.split(UpperCamelCase , 2 , axis=UpperCamelCase )
return a * tf.math.sigmoid(UpperCamelCase )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
return tf.keras.activations.gelu(UpperCamelCase , approximate=UpperCamelCase )
__A : Optional[Any] = tf.keras.activations.gelu
__A : Optional[Any] = approximate_gelu_wrap
else:
__A : Any = _gelu
__A : Union[str, Any] = _gelu_new
__A : List[str] = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def lowercase ( UpperCamelCase : List[Any] ):
"""simple docstring"""
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
| 656 | 1 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Optional[int] = {"vocab_file": "spiece.model"}
__A : List[Any] = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Dict="<s>" , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : Union[str, Any]="<unk>" , UpperCamelCase__ : Optional[int]="<sep>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[int]="<cls>" , UpperCamelCase__ : List[str]="<mask>" , UpperCamelCase__ : Optional[Any]=["<eop>", "<eod>"] , UpperCamelCase__ : Optional[Dict[str, Any]] = None , **UpperCamelCase__ : Dict , ):
A__ : List[str] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
A__ : Tuple ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
A__ : Dict =3
A__ : int =do_lower_case
A__ : str =remove_space
A__ : Optional[Any] =keep_accents
A__ : int =vocab_file
A__ : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
A__ : Union[str, Any] =jieba
A__ : List[str] =str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _UpperCAmelCase ( self : Union[str, Any] ):
return len(self.sp_model )
def _UpperCAmelCase ( self : Optional[int] ):
A__ : Any ={self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ):
A__ : Union[str, Any] =self.__dict__.copy()
A__ : Tuple =None
return state
def __setstate__( self : Tuple , UpperCamelCase__ : int ):
A__ : Union[str, Any] =d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A__ : Optional[int] ={}
A__ : Union[str, Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Dict ):
if self.remove_space:
A__ : Optional[int] =" ".join(inputs.strip().split() )
else:
A__ : Optional[Any] =inputs
A__ : Any =outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
A__ : Optional[Any] =unicodedata.normalize("NFKD" , UpperCamelCase__ )
A__ : Tuple ="".join([c for c in outputs if not unicodedata.combining(UpperCamelCase__ )] )
if self.do_lower_case:
A__ : str =outputs.lower()
return outputs
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : str ):
A__ : Optional[int] =self.preprocess_text(UpperCamelCase__ )
A__ : Dict =self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
A__ : List[str] =[]
for piece in pieces:
if len(UpperCamelCase__ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
A__ : str =self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase__ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A__ : Union[str, Any] =cur_pieces[1:]
else:
A__ : List[str] =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCamelCase__ )
else:
new_pieces.append(UpperCamelCase__ )
return new_pieces
def _UpperCAmelCase ( self : int , UpperCamelCase__ : str ):
return self.sp_model.PieceToId(UpperCamelCase__ )
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : List[Any] ):
return self.sp_model.IdToPiece(UpperCamelCase__ )
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : str ):
A__ : Optional[int] ="".join(UpperCamelCase__ ).replace(UpperCamelCase__ , " " ).strip()
return out_string
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : List[str] =[self.sep_token_id]
A__ : str =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is not None:
return ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1, 1]
return ([0] * len(UpperCamelCase__ )) + [1, 1]
def _UpperCAmelCase ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : List[str] =[self.sep_token_id]
A__ : Optional[Any] =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A__ : Tuple =os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
A__ : Optional[Any] =self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
def _UpperCAmelCase ( self : str , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : int ):
A__ : List[Any] =super()._decode(*UpperCamelCase__ , **UpperCamelCase__ )
A__ : Union[str, Any] =text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text
| 656 | """simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def _UpperCAmelCase ( self : Dict ):
A__ : Optional[Any] =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(UpperCamelCase__ , "num_attention_heads" ) )
self.parent.assertTrue(hasattr(UpperCamelCase__ , "num_encoder_blocks" ) )
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=13 , UpperCamelCase__ : Tuple=64 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : Union[str, Any]=4 , UpperCamelCase__ : Dict=[2, 2, 2, 2] , UpperCamelCase__ : Union[str, Any]=[8, 4, 2, 1] , UpperCamelCase__ : Tuple=[16, 32, 64, 128] , UpperCamelCase__ : Optional[int]=[1, 4, 8, 16] , UpperCamelCase__ : Any=[1, 2, 4, 8] , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Dict="gelu" , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Optional[Any]=None , ):
A__ : Tuple =parent
A__ : List[Any] =batch_size
A__ : List[Any] =image_size
A__ : Union[str, Any] =num_channels
A__ : Optional[int] =num_encoder_blocks
A__ : Any =sr_ratios
A__ : Any =depths
A__ : List[Any] =hidden_sizes
A__ : List[Any] =downsampling_rates
A__ : List[str] =num_attention_heads
A__ : int =is_training
A__ : List[Any] =use_labels
A__ : Any =hidden_act
A__ : Dict =hidden_dropout_prob
A__ : int =attention_probs_dropout_prob
A__ : List[Any] =initializer_range
A__ : Tuple =num_labels
A__ : List[Any] =scope
def _UpperCAmelCase ( self : Optional[int] ):
A__ : List[str] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ : Any =None
if self.use_labels:
A__ : Tuple =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A__ : List[Any] =self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self : Tuple ):
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ):
A__ : Any =SegformerModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Dict =model(UpperCamelCase__ )
A__ : Optional[int] =self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def _UpperCAmelCase ( self : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ):
A__ : str =self.num_labels
A__ : Optional[Any] =SegformerForSemanticSegmentation(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Optional[Any] =model(UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
A__ : List[Any] =model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def _UpperCAmelCase ( self : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ):
A__ : Tuple =1
A__ : Tuple =SegformerForSemanticSegmentation(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : List[str] =torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(UpperCamelCase__ )
A__ : Dict =model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertGreater(result.loss , 0.0 )
def _UpperCAmelCase ( self : str ):
A__ : Union[str, Any] =self.prepare_config_and_inputs()
A__ , A__ , A__ : Tuple =config_and_inputs
A__ : Tuple ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Dict = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__magic_name__ : Optional[int] = (
{
"""feature-extraction""": SegformerModel,
"""image-classification""": SegformerForImageClassification,
"""image-segmentation""": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__magic_name__ : Dict = True
__magic_name__ : List[str] = False
__magic_name__ : Optional[Any] = False
__magic_name__ : str = False
def _UpperCAmelCase ( self : Union[str, Any] ):
A__ : Union[str, Any] =SegformerModelTester(self )
A__ : Tuple =SegformerConfigTester(self , config_class=UpperCamelCase__ )
def _UpperCAmelCase ( self : str ):
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : Dict ):
A__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _UpperCAmelCase ( self : Tuple ):
A__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*UpperCamelCase__ )
def _UpperCAmelCase ( self : Union[str, Any] ):
A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*UpperCamelCase__ )
@unittest.skip("SegFormer does not use inputs_embeds" )
def _UpperCAmelCase ( self : Dict ):
pass
@unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods" )
def _UpperCAmelCase ( self : Tuple ):
pass
def _UpperCAmelCase ( self : List[str] ):
A__ , A__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : int =model_class(UpperCamelCase__ )
A__ : Optional[int] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : Optional[int] =[*signature.parameters.keys()]
A__ : List[str] =["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def _UpperCAmelCase ( self : str ):
A__ , A__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
A__ : Union[str, Any] =True
for model_class in self.all_model_classes:
A__ : Optional[Any] =True
A__ : Union[str, Any] =False
A__ : str =True
A__ : Optional[int] =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : str =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Any =outputs.attentions
A__ : List[str] =sum(self.model_tester.depths )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ : Dict =True
A__ : str =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Any =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Union[str, Any] =outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# verify the first attentions (first block, first layer)
A__ : List[Any] =(self.model_tester.image_size // 4) ** 2
A__ : Tuple =(self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
A__ : Tuple =(self.model_tester.image_size // 32) ** 2
A__ : Optional[Any] =(self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
A__ : int =len(UpperCamelCase__ )
# Check attention is always last and order is fine
A__ : Optional[Any] =True
A__ : Any =True
A__ : Union[str, Any] =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Optional[Any] =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + 1 , len(UpperCamelCase__ ) )
A__ : Optional[Any] =outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# verify the first attentions (first block, first layer)
A__ : Union[str, Any] =(self.model_tester.image_size // 4) ** 2
A__ : Tuple =(self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _UpperCAmelCase ( self : List[Any] ):
def check_hidden_states_output(UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ):
A__ : Optional[Any] =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : List[Any] =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Optional[Any] =outputs.hidden_states
A__ : int =self.model_tester.num_encoder_blocks
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
A__ , A__ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Optional[Any] =True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ : str =True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _UpperCAmelCase ( self : Optional[int] ):
if not self.model_tester.is_training:
return
A__ , A__ : int =self.model_tester.prepare_config_and_inputs_for_common()
A__ : List[Any] =True
for model_class in self.all_model_classes:
if model_class in get_values(UpperCamelCase__ ):
continue
A__ : List[Any] =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
A__ : int =self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
A__ : Union[str, Any] =model(**UpperCamelCase__ ).loss
loss.backward()
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _UpperCAmelCase ( self : Tuple ):
pass
@slow
def _UpperCAmelCase ( self : Tuple ):
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Tuple =SegformerModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def lowercase ( ):
"""simple docstring"""
A__ : List[Any] =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
@slow
def _UpperCAmelCase ( self : Tuple ):
# only resize + normalize
A__ : List[Any] =SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCamelCase__ , align=UpperCamelCase__ , do_random_crop=UpperCamelCase__ )
A__ : Union[str, Any] =SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
UpperCamelCase__ )
A__ : Union[str, Any] =prepare_img()
A__ : Union[str, Any] =image_processor(images=UpperCamelCase__ , return_tensors="pt" )
A__ : int =encoded_inputs.pixel_values.to(UpperCamelCase__ )
with torch.no_grad():
A__ : int =model(UpperCamelCase__ )
A__ : Dict =torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A__ : Optional[int] =torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
@slow
def _UpperCAmelCase ( self : Union[str, Any] ):
# only resize + normalize
A__ : Dict =SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCamelCase__ , align=UpperCamelCase__ , do_random_crop=UpperCamelCase__ )
A__ : int =SegformerForSemanticSegmentation.from_pretrained(
"nvidia/segformer-b1-finetuned-cityscapes-1024-1024" ).to(UpperCamelCase__ )
A__ : Tuple =prepare_img()
A__ : str =image_processor(images=UpperCamelCase__ , return_tensors="pt" )
A__ : Optional[int] =encoded_inputs.pixel_values.to(UpperCamelCase__ )
with torch.no_grad():
A__ : int =model(UpperCamelCase__ )
A__ : List[str] =torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A__ : List[Any] =torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1E-1 ) )
@slow
def _UpperCAmelCase ( self : int ):
# only resize + normalize
A__ : Optional[Any] =SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCamelCase__ , align=UpperCamelCase__ , do_random_crop=UpperCamelCase__ )
A__ : List[Any] =SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
UpperCamelCase__ )
A__ : str =prepare_img()
A__ : Dict =image_processor(images=UpperCamelCase__ , return_tensors="pt" )
A__ : Any =encoded_inputs.pixel_values.to(UpperCamelCase__ )
with torch.no_grad():
A__ : Dict =model(UpperCamelCase__ )
A__ : Any =outputs.logits.detach().cpu()
A__ : Union[str, Any] =image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(500, 300)] )
A__ : List[str] =torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
A__ : int =image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ )
A__ : Tuple =torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
| 656 | 1 |
"""simple docstring"""
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
__A : Optional[int] = False
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Optional[int]=32 ):
set_seed(0 )
A__ : List[str] =UNetaDModel(sample_size=UpperCamelCase__ , in_channels=3 , out_channels=3 )
A__ : Optional[int] =torch.optim.SGD(model.parameters() , lr=0.0001 )
return model, optimizer
@slow
def _UpperCAmelCase ( self : Any ):
A__ : Optional[int] ="cpu" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
A__ : Dict =DDPMScheduler(
num_train_timesteps=1000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule="linear" , clip_sample=UpperCamelCase__ , )
A__ : str =DDIMScheduler(
num_train_timesteps=1000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule="linear" , clip_sample=UpperCamelCase__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
A__ : List[str] =[torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(UpperCamelCase__ ) for _ in range(4 )]
A__ : int =[torch.randn((4, 3, 32, 32) ).to(UpperCamelCase__ ) for _ in range(4 )]
A__ : Optional[Any] =[torch.randint(0 , 1000 , (4,) ).long().to(UpperCamelCase__ ) for _ in range(4 )]
# train with a DDPM scheduler
A__ , A__ : List[str] =self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
A__ : str =ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
A__ : Optional[Any] =model(UpperCamelCase__ , timesteps[i] ).sample
A__ : Tuple =torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
A__ , A__ : Any =self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
A__ : Tuple =ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
A__ : Union[str, Any] =model(UpperCamelCase__ , timesteps[i] ).sample
A__ : Optional[Any] =torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-5 ) )
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-5 ) )
| 656 | """simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any]=13 , UpperCamelCase__ : Optional[int]=7 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : List[str]=99 , UpperCamelCase__ : Optional[Any]=32 , UpperCamelCase__ : Any=5 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : Union[str, Any]=37 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Optional[Any]=512 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : List[Any]=4 , ):
A__ : str =parent
A__ : List[str] =batch_size
A__ : Any =seq_length
A__ : List[str] =is_training
A__ : List[Any] =use_attention_mask
A__ : List[Any] =use_token_type_ids
A__ : Dict =use_labels
A__ : List[Any] =vocab_size
A__ : Optional[int] =hidden_size
A__ : Optional[Any] =num_hidden_layers
A__ : str =num_attention_heads
A__ : int =intermediate_size
A__ : Tuple =hidden_act
A__ : Tuple =hidden_dropout_prob
A__ : Dict =attention_probs_dropout_prob
A__ : Any =max_position_embeddings
A__ : Any =type_vocab_size
A__ : Union[str, Any] =type_sequence_label_size
A__ : Optional[Any] =initializer_range
A__ : int =num_choices
def _UpperCAmelCase ( self : Tuple ):
A__ : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : List[str] =None
if self.use_attention_mask:
A__ : Optional[int] =random_attention_mask([self.batch_size, self.seq_length] )
A__ : str =None
if self.use_token_type_ids:
A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ : Any =RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCAmelCase ( self : Tuple ):
A__ : Dict =self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ : str =config_and_inputs
A__ : Optional[Any] ={"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def _UpperCAmelCase ( self : int ):
A__ : str =self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ : Union[str, Any] =config_and_inputs
A__ : Union[str, Any] =True
A__ : List[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class __lowerCAmelCase ( _UpperCamelCase , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Union[str, Any] = True
__magic_name__ : Dict = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _UpperCAmelCase ( self : Optional[int] ):
A__ : Optional[int] =FlaxRobertaPreLayerNormModelTester(self )
@slow
def _UpperCAmelCase ( self : List[Any] ):
for model_class_name in self.all_model_classes:
A__ : Tuple =model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=UpperCamelCase__ )
A__ : Union[str, Any] =model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase__ )
@require_flax
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
@slow
def _UpperCAmelCase ( self : Tuple ):
A__ : Any =FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=UpperCamelCase__ )
A__ : Tuple =np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa )
A__ : str =model(UpperCamelCase__ )[0]
A__ : List[Any] =[1, 11, 50265]
self.assertEqual(list(output.shape ) , UpperCamelCase__ )
# compare the actual values for a slice.
A__ : Any =np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
@slow
def _UpperCAmelCase ( self : List[Any] ):
A__ : Union[str, Any] =FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=UpperCamelCase__ )
A__ : List[Any] =np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa )
A__ : Dict =model(UpperCamelCase__ )[0]
# compare the actual values for a slice.
A__ : Optional[Any] =np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
| 656 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A : Tuple = logging.get_logger(__name__)
__A : Optional[int] = {
"facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json",
}
class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase):
'''simple docstring'''
__magic_name__ : List[Any] = """convnextv2"""
def __init__( self : Any , UpperCamelCase__ : Union[str, Any]=3 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : str=1E-12 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : int=224 , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Optional[Any]=None , **UpperCamelCase__ : str , ):
super().__init__(**UpperCamelCase__ )
A__ : Dict =num_channels
A__ : int =patch_size
A__ : int =num_stages
A__ : Union[str, Any] =[96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
A__ : Dict =[3, 3, 9, 3] if depths is None else depths
A__ : List[str] =hidden_act
A__ : Optional[Any] =initializer_range
A__ : int =layer_norm_eps
A__ : str =drop_path_rate
A__ : List[str] =image_size
A__ : List[str] =["stem"] + [F'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
A__ , A__ : List[Any] =get_aligned_output_features_output_indices(
out_features=UpperCamelCase__ , out_indices=UpperCamelCase__ , stage_names=self.stage_names )
| 656 | """simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : List[Any] = logging.get_logger(__name__)
__A : Any = [
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
__A : Optional[int] = [
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def lowercase ( UpperCamelCase : Tuple ):
"""simple docstring"""
A__ : Union[str, Any] =torch.load(UpperCamelCase , map_location="cpu" )
return sd
def lowercase ( UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : int=rename_keys_prefix ):
"""simple docstring"""
A__ : List[str] =OrderedDict()
A__ : str =torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
A__ : Optional[Any] =key
for name_pair in rename_keys_prefix:
A__ : int =new_key.replace(name_pair[0] , name_pair[1] )
A__ : Dict =d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
A__ : Optional[int] =new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def lowercase ( UpperCamelCase : Dict , UpperCamelCase : List[str] ):
"""simple docstring"""
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
A__ : Any ="pretraining"
if "vcr" in checkpoint_path:
A__ : Union[str, Any] ={"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
A__ : Optional[Any] ={"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
A__ : Optional[int] ={"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
A__ : List[str] ={"visual_embedding_dim": 1024}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
A__ : Optional[int] ={"visual_embedding_dim": 512}
A__ : List[str] ="multichoice"
elif "vqa_advanced" in checkpoint_path:
A__ : Any ={"visual_embedding_dim": 2048}
A__ : str ="vqa_advanced"
elif "vqa" in checkpoint_path:
A__ : Optional[int] ={"visual_embedding_dim": 2048, "num_labels": 3129}
A__ : str ="vqa"
elif "nlvr" in checkpoint_path:
A__ : str ={
"visual_embedding_dim": 1024,
"num_labels": 2,
}
A__ : Dict ="nlvr"
A__ : Union[str, Any] =VisualBertConfig(**UpperCamelCase )
# Load State Dict
A__ : int =load_state_dict(UpperCamelCase )
A__ : Tuple =get_new_dict(UpperCamelCase , UpperCamelCase )
if model_type == "pretraining":
A__ : str =VisualBertForPreTraining(UpperCamelCase )
elif model_type == "vqa":
A__ : Optional[int] =VisualBertForQuestionAnswering(UpperCamelCase )
elif model_type == "nlvr":
A__ : Union[str, Any] =VisualBertForVisualReasoning(UpperCamelCase )
elif model_type == "multichoice":
A__ : Union[str, Any] =VisualBertForMultipleChoice(UpperCamelCase )
model.load_state_dict(UpperCamelCase )
# Save Checkpoints
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
model.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
__A : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
__A : str = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 656 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def lowercase ( UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray ):
"""simple docstring"""
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(UpperCamelCase , UpperCamelCase ) ) )
def lowercase ( UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray ):
"""simple docstring"""
if dataset.ndim != value_array.ndim:
A__ : str =(
"Wrong input data's dimensions... "
F'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(UpperCamelCase )
try:
if dataset.shape[1] != value_array.shape[1]:
A__ : Union[str, Any] =(
"Wrong input data's shape... "
F'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(UpperCamelCase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
A__ : Any =(
"Input data have different datatype... "
F'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(UpperCamelCase )
A__ : Dict =[]
for value in value_array:
A__ : Optional[Any] =euclidean(UpperCamelCase , dataset[0] )
A__ : List[Any] =dataset[0].tolist()
for dataset_value in dataset[1:]:
A__ : Dict =euclidean(UpperCamelCase , UpperCamelCase )
if dist > temp_dist:
A__ : Dict =temp_dist
A__ : Optional[int] =dataset_value.tolist()
answer.append([vector, dist] )
return answer
def lowercase ( UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray ):
"""simple docstring"""
return np.dot(UpperCamelCase , UpperCamelCase ) / (norm(UpperCamelCase ) * norm(UpperCamelCase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 656 | """simple docstring"""
__A : Union[str, Any] = {str(digit): digit**5 for digit in range(10)}
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(UpperCamelCase ) )
def lowercase ( ):
"""simple docstring"""
return sum(
number
for number in range(1000 , 1000000 )
if number == digits_fifth_powers_sum(UpperCamelCase ) )
if __name__ == "__main__":
print(solution())
| 656 | 1 |
"""simple docstring"""
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowercase ( UpperCamelCase : Any ):
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : str , UpperCamelCase__ : nn.Module , UpperCamelCase__ : int ):
super().__init__()
A__ : Optional[Any] =module
A__ : Union[str, Any] =nn.Sequential(
nn.Linear(module.in_features , UpperCamelCase__ , bias=UpperCamelCase__ ) , nn.Linear(UpperCamelCase__ , module.out_features , bias=UpperCamelCase__ ) , )
A__ : Any =(2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=UpperCamelCase__ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : Optional[Any] , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Union[str, Any] ):
return self.module(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ ) + self.adapter(UpperCamelCase__ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
__magic_name__ : str = """bigscience/bloom-1b7"""
# Constant values
__magic_name__ : List[str] = 2.109_6595_5269_2574
__magic_name__ : Tuple = """Hello my name is"""
__magic_name__ : int = set()
EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""")
EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""")
EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""")
__magic_name__ : int = 10
def _UpperCAmelCase ( self : int ):
# Models and tokenizer
A__ : str =AutoTokenizer.from_pretrained(self.model_name )
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def _UpperCAmelCase ( self : Any ):
super().setUp()
# Models and tokenizer
A__ : str =AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="auto" )
A__ : Any =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="auto" )
def _UpperCAmelCase ( self : Union[str, Any] ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self : int ):
A__ : Optional[int] =self.model_abit.config
self.assertTrue(hasattr(UpperCamelCase__ , "quantization_config" ) )
A__ : List[Any] =config.to_dict()
A__ : Dict =config.to_diff_dict()
A__ : Dict =config.to_json_string()
def _UpperCAmelCase ( self : str ):
from bitsandbytes.nn import Paramsabit
A__ : Union[str, Any] =self.model_fpaa.get_memory_footprint()
A__ : int =self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
A__ : Optional[int] =get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def _UpperCAmelCase ( self : List[Any] ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(UpperCamelCase__ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def _UpperCAmelCase ( self : int ):
A__ : Optional[int] =self.tokenizer(self.input_text , return_tensors="pt" )
A__ : Any =self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase__ ) , self.EXPECTED_OUTPUTS )
def _UpperCAmelCase ( self : List[str] ):
A__ : List[Any] =BitsAndBytesConfig()
A__ : Union[str, Any] =True
A__ : List[str] =AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=UpperCamelCase__ , device_map="auto" )
A__ : Any =self.tokenizer(self.input_text , return_tensors="pt" )
A__ : Optional[Any] =model_abit_from_config.generate(
input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase__ ) , self.EXPECTED_OUTPUTS )
def _UpperCAmelCase ( self : Optional[int] ):
with self.assertRaises(UpperCamelCase__ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(UpperCamelCase__ )
def _UpperCAmelCase ( self : List[Any] ):
A__ : int =BitsAndBytesConfig()
with self.assertRaises(UpperCamelCase__ ):
A__ : int =AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=UpperCamelCase__ , load_in_abit=UpperCamelCase__ , device_map="auto" , bnb_abit_quant_type="nf4" , )
def _UpperCAmelCase ( self : str ):
with self.assertRaises(UpperCamelCase__ ):
# Tries with `str`
self.model_abit.to("cpu" )
with self.assertRaises(UpperCamelCase__ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(UpperCamelCase__ ):
# Tries with a `device`
self.model_abit.to(torch.device("cuda:0" ) )
with self.assertRaises(UpperCamelCase__ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(UpperCamelCase__ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
A__ : List[Any] =self.tokenizer(self.input_text , return_tensors="pt" )
A__ : Union[str, Any] =self.model_fpaa.to(torch.floataa )
A__ : Union[str, Any] =self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
A__ : Optional[Any] =self.model_fpaa.to("cpu" )
# Check this does not throw an error
A__ : List[str] =self.model_fpaa.half()
# Check this does not throw an error
A__ : Dict =self.model_fpaa.float()
def _UpperCAmelCase ( self : Optional[Any] ):
A__ : Dict =AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=UpperCamelCase__ , device_map="auto" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
@classmethod
def _UpperCAmelCase ( cls : Dict ):
A__ : Optional[int] ="t5-small"
A__ : Optional[int] ="google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
A__ : Any =AutoTokenizer.from_pretrained(cls.model_name )
A__ : Optional[int] ="Translate in German: Hello, my dog is cute"
def _UpperCAmelCase ( self : Dict ):
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self : Tuple ):
from transformers import TaForConditionalGeneration
A__ : Union[str, Any] =TaForConditionalGeneration._keep_in_fpaa_modules
A__ : Dict =None
# test with `t5-small`
A__ : Union[str, Any] =TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="auto" )
A__ : str =self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
A__ : Any =model.generate(**UpperCamelCase__ )
# test with `flan-t5-small`
A__ : str =TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=UpperCamelCase__ , device_map="auto" )
A__ : Optional[Any] =self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
A__ : List[Any] =model.generate(**UpperCamelCase__ )
A__ : Any =modules
def _UpperCAmelCase ( self : Tuple ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
A__ : Dict =TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="auto" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
A__ : Union[str, Any] =self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
A__ : Any =model.generate(**UpperCamelCase__ )
# test with `flan-t5-small`
A__ : List[Any] =TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=UpperCamelCase__ , device_map="auto" )
A__ : List[str] =self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
A__ : int =model.generate(**UpperCamelCase__ )
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def _UpperCAmelCase ( self : Optional[int] ):
super().setUp()
# model_name
A__ : List[Any] ="bigscience/bloom-560m"
A__ : int ="t5-small"
# Different types of model
A__ : Optional[Any] =AutoModel.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="auto" )
# Sequence classification model
A__ : Optional[int] =AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=UpperCamelCase__ , device_map="auto" )
# CausalLM model
A__ : Any =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="auto" )
# Seq2seq model
A__ : Optional[int] =AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=UpperCamelCase__ , device_map="auto" )
def _UpperCAmelCase ( self : int ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self : List[Any] ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def _UpperCAmelCase ( self : str ):
super().setUp()
def _UpperCAmelCase ( self : Optional[int] ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self : str ):
A__ : Union[str, Any] =pipeline(
"text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
A__ : Optional[Any] =self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def _UpperCAmelCase ( self : List[Any] ):
super().setUp()
def _UpperCAmelCase ( self : List[Any] ):
A__ : List[str] =AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=UpperCamelCase__ , device_map="balanced" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
A__ : Union[str, Any] =self.tokenizer(self.input_text , return_tensors="pt" )
# Second real batch
A__ : List[str] =model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=UpperCamelCase__ ) , self.EXPECTED_OUTPUTS )
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def _UpperCAmelCase ( self : Tuple ):
A__ : Optional[int] ="facebook/opt-350m"
super().setUp()
def _UpperCAmelCase ( self : List[Any] ):
if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ):
return
# Step 1: freeze all parameters
A__ : Tuple =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
A__ : Any =False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
A__ : int =param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(UpperCamelCase__ ) ):
A__ : Optional[Any] =LoRALayer(module.q_proj , rank=16 )
A__ : List[Any] =LoRALayer(module.k_proj , rank=16 )
A__ : Optional[Any] =LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
A__ : Dict =self.tokenizer("Test batch " , return_tensors="pt" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
A__ : List[Any] =model.forward(**UpperCamelCase__ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(UpperCamelCase__ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : List[str] = """gpt2-xl"""
__magic_name__ : List[Any] = 3.3191_8548_5415_2187
| 656 | """simple docstring"""
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
__A : Optional[Any] = logging.get_logger(__name__)
# General docstring
__A : str = "PoolFormerConfig"
# Base docstring
__A : Optional[Any] = "sail/poolformer_s12"
__A : List[Any] = [1, 512, 7, 7]
# Image classification docstring
__A : List[str] = "sail/poolformer_s12"
__A : Tuple = "tabby, tabby cat"
__A : Tuple = [
"sail/poolformer_s12",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def lowercase ( UpperCamelCase : Any , UpperCamelCase : float = 0.0 , UpperCamelCase : bool = False ):
"""simple docstring"""
if drop_prob == 0.0 or not training:
return input
A__ : Tuple =1 - drop_prob
A__ : List[str] =(input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
A__ : Any =keep_prob + torch.rand(UpperCamelCase , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
A__ : Optional[int] =input.div(UpperCamelCase ) * random_tensor
return output
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : Optional[float] = None ):
super().__init__()
A__ : Optional[int] =drop_prob
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : torch.Tensor ):
return drop_path(UpperCamelCase__ , self.drop_prob , self.training )
def _UpperCAmelCase ( self : List[str] ):
return "p={}".format(self.drop_prob )
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int=None ):
super().__init__()
A__ : Optional[int] =patch_size if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (patch_size, patch_size)
A__ : Optional[int] =stride if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (stride, stride)
A__ : int =padding if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (padding, padding)
A__ : Any =nn.Convad(UpperCamelCase__ , UpperCamelCase__ , kernel_size=UpperCamelCase__ , stride=UpperCamelCase__ , padding=UpperCamelCase__ )
A__ : Any =norm_layer(UpperCamelCase__ ) if norm_layer else nn.Identity()
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : str ):
A__ : List[str] =self.projection(UpperCamelCase__ )
A__ : Any =self.norm(UpperCamelCase__ )
return embeddings
class __lowerCAmelCase ( nn.GroupNorm):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : Dict , **UpperCamelCase__ : Union[str, Any] ):
super().__init__(1 , UpperCamelCase__ , **UpperCamelCase__ )
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : Optional[int] ):
super().__init__()
A__ : Any =nn.AvgPoolad(UpperCamelCase__ , stride=1 , padding=pool_size // 2 , count_include_pad=UpperCamelCase__ )
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : List[str] ):
return self.pool(UpperCamelCase__ ) - hidden_states
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] ):
super().__init__()
A__ : List[Any] =nn.Convad(UpperCamelCase__ , UpperCamelCase__ , 1 )
A__ : Union[str, Any] =nn.Convad(UpperCamelCase__ , UpperCamelCase__ , 1 )
A__ : Dict =PoolFormerDropPath(UpperCamelCase__ )
if isinstance(config.hidden_act , UpperCamelCase__ ):
A__ : Tuple =ACTaFN[config.hidden_act]
else:
A__ : Optional[Any] =config.hidden_act
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : Dict ):
A__ : Optional[Any] =self.conva(UpperCamelCase__ )
A__ : List[str] =self.act_fn(UpperCamelCase__ )
A__ : List[str] =self.drop(UpperCamelCase__ )
A__ : Optional[int] =self.conva(UpperCamelCase__ )
A__ : Optional[Any] =self.drop(UpperCamelCase__ )
return hidden_states
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any ):
super().__init__()
A__ : Optional[int] =PoolFormerPooling(UpperCamelCase__ )
A__ : List[str] =PoolFormerOutput(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A__ : int =PoolFormerGroupNorm(UpperCamelCase__ )
A__ : int =PoolFormerGroupNorm(UpperCamelCase__ )
# Useful for training neural nets
A__ : Tuple =PoolFormerDropPath(UpperCamelCase__ ) if drop_path > 0.0 else nn.Identity()
A__ : Optional[Any] =config.use_layer_scale
if config.use_layer_scale:
A__ : List[str] =nn.Parameter(
config.layer_scale_init_value * torch.ones((UpperCamelCase__) ) , requires_grad=UpperCamelCase__ )
A__ : List[Any] =nn.Parameter(
config.layer_scale_init_value * torch.ones((UpperCamelCase__) ) , requires_grad=UpperCamelCase__ )
def _UpperCAmelCase ( self : Any , UpperCamelCase__ : Optional[int] ):
if self.use_layer_scale:
A__ : Optional[int] =self.pooling(self.before_norm(UpperCamelCase__ ) )
A__ : Union[str, Any] =self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
A__ : Union[str, Any] =hidden_states + self.drop_path(UpperCamelCase__ )
A__ : Tuple =()
A__ : List[str] =self.output(self.after_norm(UpperCamelCase__ ) )
A__ : Optional[Any] =self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
A__ : str =hidden_states + self.drop_path(UpperCamelCase__ )
A__ : List[Any] =(output,) + outputs
return outputs
else:
A__ : Tuple =self.drop_path(self.pooling(self.before_norm(UpperCamelCase__ ) ) )
# First residual connection
A__ : Optional[Any] =pooling_output + hidden_states
A__ : Tuple =()
# Second residual connection inside the PoolFormerOutput block
A__ : List[str] =self.drop_path(self.output(self.after_norm(UpperCamelCase__ ) ) )
A__ : Any =hidden_states + layer_output
A__ : Tuple =(output,) + outputs
return outputs
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : List[str] ):
super().__init__()
A__ : Tuple =config
# stochastic depth decay rule
A__ : Dict =[x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
A__ : Tuple =[]
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
A__ : List[str] =nn.ModuleList(UpperCamelCase__ )
# Transformer blocks
A__ : Union[str, Any] =[]
A__ : Any =0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
A__ : Union[str, Any] =[]
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
UpperCamelCase__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(UpperCamelCase__ ) )
A__ : str =nn.ModuleList(UpperCamelCase__ )
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Optional[int]=True ):
A__ : Union[str, Any] =() if output_hidden_states else None
A__ : Dict =pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
A__ , A__ : List[Any] =layers
# Get patch embeddings from hidden_states
A__ : Any =embedding_layer(UpperCamelCase__ )
# Send the embeddings through the blocks
for _, blk in enumerate(UpperCamelCase__ ):
A__ : List[str] =blk(UpperCamelCase__ )
A__ : Tuple =layer_outputs[0]
if output_hidden_states:
A__ : List[Any] =all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=UpperCamelCase__ , hidden_states=UpperCamelCase__ )
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : List[str] = PoolFormerConfig
__magic_name__ : int = """poolformer"""
__magic_name__ : Any = """pixel_values"""
__magic_name__ : Any = True
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : str ):
if isinstance(UpperCamelCase__ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCamelCase__ , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any]=False ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Optional[Any] =value
__A : Optional[int] = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__A : Dict = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n"
@add_start_docstrings(
"""The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.""" , _UpperCamelCase , )
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : Dict ):
super().__init__(UpperCamelCase__ )
A__ : List[Any] =config
A__ : Optional[Any] =PoolFormerEncoder(UpperCamelCase__ )
# Initialize weights and apply final processing
self.post_init()
def _UpperCAmelCase ( self : Tuple ):
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(UpperCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _UpperCAmelCase ( self : str , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , ):
A__ : int =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A__ : Optional[int] =return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
A__ : List[Any] =self.encoder(
UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , return_dict=UpperCamelCase__ , )
A__ : int =encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=UpperCamelCase__ , hidden_states=encoder_outputs.hidden_states , )
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Optional[Any] ):
super().__init__()
A__ : List[str] =nn.Linear(config.hidden_size , config.hidden_size )
def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : List[Any] ):
A__ : int =self.dense(UpperCamelCase__ )
return output
@add_start_docstrings(
"""
PoolFormer Model transformer with an image classification head on top
""" , _UpperCamelCase , )
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : str ):
super().__init__(UpperCamelCase__ )
A__ : List[str] =config.num_labels
A__ : Optional[int] =PoolFormerModel(UpperCamelCase__ )
# Final norm
A__ : Dict =PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
A__ : Dict =(
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , ):
A__ : Tuple =return_dict if return_dict is not None else self.config.use_return_dict
A__ : List[str] =self.poolformer(
UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , return_dict=UpperCamelCase__ , )
A__ : str =outputs[0]
A__ : List[Any] =self.classifier(self.norm(UpperCamelCase__ ).mean([-2, -1] ) )
A__ : Optional[Any] =None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A__ : int ="regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A__ : Tuple ="single_label_classification"
else:
A__ : Optional[int] ="multi_label_classification"
if self.config.problem_type == "regression":
A__ : Dict =MSELoss()
if self.num_labels == 1:
A__ : Optional[Any] =loss_fct(logits.squeeze() , labels.squeeze() )
else:
A__ : List[str] =loss_fct(UpperCamelCase__ , UpperCamelCase__ )
elif self.config.problem_type == "single_label_classification":
A__ : Tuple =CrossEntropyLoss()
A__ : int =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A__ : List[Any] =BCEWithLogitsLoss()
A__ : str =loss_fct(UpperCamelCase__ , UpperCamelCase__ )
if not return_dict:
A__ : Optional[int] =(logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=UpperCamelCase__ , logits=UpperCamelCase__ , hidden_states=outputs.hidden_states )
| 656 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Union[str, Any] = logging.get_logger(__name__)
__A : Any = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Tuple = """megatron-bert"""
def __init__( self : Tuple , UpperCamelCase__ : Dict=29056 , UpperCamelCase__ : int=1024 , UpperCamelCase__ : Optional[int]=24 , UpperCamelCase__ : Dict=16 , UpperCamelCase__ : int=4096 , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : int=512 , UpperCamelCase__ : str=2 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : str="absolute" , UpperCamelCase__ : Dict=True , **UpperCamelCase__ : Tuple , ):
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A__ : Optional[int] =vocab_size
A__ : Optional[int] =hidden_size
A__ : str =num_hidden_layers
A__ : Any =num_attention_heads
A__ : str =hidden_act
A__ : Optional[int] =intermediate_size
A__ : str =hidden_dropout_prob
A__ : str =attention_probs_dropout_prob
A__ : List[Any] =max_position_embeddings
A__ : List[Any] =type_vocab_size
A__ : Tuple =initializer_range
A__ : Any =layer_norm_eps
A__ : Any =position_embedding_type
A__ : Union[str, Any] =use_cache
| 656 | """simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
'''simple docstring'''
__magic_name__ : int = IFInpaintingSuperResolutionPipeline
__magic_name__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
__magic_name__ : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""})
__magic_name__ : Optional[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def _UpperCAmelCase ( self : Union[str, Any] ):
return self._get_superresolution_dummy_components()
def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int]=0 ):
if str(UpperCamelCase__ ).startswith("mps" ):
A__ : Any =torch.manual_seed(UpperCamelCase__ )
else:
A__ : Dict =torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
A__ : Tuple =floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
A__ : Optional[int] =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
A__ : Any =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
A__ : List[str] ={
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _UpperCAmelCase ( self : Dict ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _UpperCAmelCase ( self : int ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def _UpperCAmelCase ( self : Tuple ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _UpperCAmelCase ( self : str ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _UpperCAmelCase ( self : Dict ):
self._test_save_load_local()
def _UpperCAmelCase ( self : Optional[int] ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 656 | 1 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
assert isinstance(snake_case, snake_case )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''', [False, True] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = tmp_path / '''cache'''
__magic_name__ :int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__magic_name__ :Optional[Any] = ParquetDatasetReader(snake_case, cache_dir=snake_case, keep_in_memory=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
@pytest.mark.parametrize(
'''features''', [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
], )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[str] = tmp_path / '''cache'''
__magic_name__ :int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :Tuple = features.copy() if features else default_expected_features
__magic_name__ :Union[str, Any] = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ :int = ParquetDatasetReader(snake_case, features=snake_case, cache_dir=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
@pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = tmp_path / '''cache'''
__magic_name__ :List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :int = ParquetDatasetReader(snake_case, cache_dir=snake_case, split=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''', [str, list] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
if issubclass(snake_case, snake_case ):
__magic_name__ :Union[str, Any] = parquet_path
elif issubclass(snake_case, snake_case ):
__magic_name__ :Union[str, Any] = [parquet_path]
__magic_name__ :Optional[int] = tmp_path / '''cache'''
__magic_name__ :Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :str = ParquetDatasetReader(snake_case, cache_dir=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
def __lowercase ( snake_case, snake_case, snake_case=("train",) ):
"""simple docstring"""
assert isinstance(snake_case, snake_case )
for split in splits:
__magic_name__ :Optional[Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''', [False, True] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Any = tmp_path / '''cache'''
__magic_name__ :Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__magic_name__ :Tuple = ParquetDatasetReader(
{'''train''': parquet_path}, cache_dir=snake_case, keep_in_memory=snake_case ).read()
_check_parquet_datasetdict(snake_case, snake_case )
@pytest.mark.parametrize(
'''features''', [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
], )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Optional[Any] = tmp_path / '''cache'''
__magic_name__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :int = features.copy() if features else default_expected_features
__magic_name__ :List[Any] = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ :Optional[int] = ParquetDatasetReader({'''train''': parquet_path}, features=snake_case, cache_dir=snake_case ).read()
_check_parquet_datasetdict(snake_case, snake_case )
@pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
if split:
__magic_name__ :Dict = {split: parquet_path}
else:
__magic_name__ :Optional[int] = '''train'''
__magic_name__ :Dict = {'''train''': parquet_path, '''test''': parquet_path}
__magic_name__ :List[Any] = tmp_path / '''cache'''
__magic_name__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :Optional[Any] = ParquetDatasetReader(snake_case, cache_dir=snake_case ).read()
_check_parquet_datasetdict(snake_case, snake_case, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = ParquetDatasetWriter(snake_case, tmp_path / '''foo.parquet''' )
assert writer.write() > 0
__magic_name__ :List[Any] = pq.ParquetFile(tmp_path / '''foo.parquet''' )
__magic_name__ :List[Any] = pf.read()
assert dataset.data.table == output_table
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[str] = str(shared_datadir / '''test_image_rgb.jpg''' )
__magic_name__ :Tuple = {'''image''': [image_path]}
__magic_name__ :List[Any] = Features({'''image''': Image()} )
__magic_name__ :Tuple = Dataset.from_dict(snake_case, features=snake_case )
__magic_name__ :Union[str, Any] = ParquetDatasetWriter(snake_case, tmp_path / '''foo.parquet''' )
assert writer.write() > 0
__magic_name__ :List[str] = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
__magic_name__ :List[str] = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ), streaming=snake_case ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''', [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
], )
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
assert get_writer_batch_size(snake_case ) == expected
| 0 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__A : Any = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
__A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 656 | 0 |
def _A ( _lowercase = 10_00 ) -> int:
"""simple docstring"""
__UpperCamelCase = 3
__UpperCamelCase = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 1 | """simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowercase ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any]=10 ):
"""simple docstring"""
A__ : Tuple =[]
for _ in range(UpperCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowercase ( UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any]=10 ):
"""simple docstring"""
A__ : Dict =[]
for step in range(UpperCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : List[Any] =os.path.join(UpperCamelCase , "schedule.bin" )
torch.save(scheduler.state_dict() , UpperCamelCase )
A__ : Dict =torch.load(UpperCamelCase )
scheduler.load_state_dict(UpperCamelCase )
return lrs
@require_torch
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int ):
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for a, b in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertAlmostEqual(UpperCamelCase__ , UpperCamelCase__ , delta=UpperCamelCase__ )
def _UpperCAmelCase ( self : Tuple ):
A__ : Any =torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase__ )
A__ : Optional[Any] =torch.tensor([0.4, 0.2, -0.5] )
A__ : Any =nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A__ : List[str] =AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
A__ : Optional[int] =criterion(UpperCamelCase__ , UpperCamelCase__ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def _UpperCAmelCase ( self : Dict ):
A__ : Optional[int] =torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase__ )
A__ : Dict =torch.tensor([0.4, 0.2, -0.5] )
A__ : Optional[int] =nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A__ : int =Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCamelCase__ , weight_decay=0.0 , relative_step=UpperCamelCase__ , scale_parameter=UpperCamelCase__ , warmup_init=UpperCamelCase__ , )
for _ in range(1000 ):
A__ : List[Any] =criterion(UpperCamelCase__ , UpperCamelCase__ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
__magic_name__ : Optional[int] = nn.Linear(50 , 50) if is_torch_available() else None
__magic_name__ : Any = AdamW(m.parameters() , lr=10.0) if is_torch_available() else None
__magic_name__ : Union[str, Any] = 10
def _UpperCAmelCase ( self : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int=None ):
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for a, b in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertAlmostEqual(UpperCamelCase__ , UpperCamelCase__ , delta=UpperCamelCase__ , msg=UpperCamelCase__ )
def _UpperCAmelCase ( self : Optional[Any] ):
A__ : Union[str, Any] ={"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
A__ : Union[str, Any] ={
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
A__ , A__ : Any =data
A__ : Union[str, Any] =scheduler_func(self.optimizer , **UpperCamelCase__ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
A__ : int =unwrap_schedule(UpperCamelCase__ , self.num_steps )
self.assertListAlmostEqual(
UpperCamelCase__ , UpperCamelCase__ , tol=1E-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , )
A__ : List[str] =scheduler_func(self.optimizer , **UpperCamelCase__ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(UpperCamelCase__ ) # wrap to test picklability of the schedule
A__ : Tuple =unwrap_and_save_reload_schedule(UpperCamelCase__ , self.num_steps )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ , msg=F'''failed for {scheduler_func} in save and reload''' )
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : str ):
A__ : int =fn
def __call__( self : List[Any] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[Any] ):
return self.fn(*UpperCamelCase__ , **UpperCamelCase__ )
@classmethod
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : Dict ):
A__ : str =list(map(self , scheduler.lr_lambdas ) )
| 656 | 0 |
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[List, PIL.Image.Image, torch.Tensor] ) -> List[Any]:
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , _snake_case , )
if isinstance(_snake_case , torch.Tensor ):
return image
elif isinstance(_snake_case , PIL.Image.Image ):
_A = [image]
if isinstance(image[0] , PIL.Image.Image ):
_A , _A = image[0].size
_A , _A = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
_A = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
_A = np.concatenate(_snake_case , axis=0 )
_A = np.array(_snake_case ).astype(np.floataa ) / 255.0
_A = image.transpose(0 , 3 , 1 , 2 )
_A = 2.0 * image - 1.0
_A = torch.from_numpy(_snake_case )
elif isinstance(image[0] , torch.Tensor ):
_A = torch.cat(_snake_case , dim=0 )
return image
def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[List, PIL.Image.Image, torch.Tensor] ) -> Optional[Any]:
if isinstance(_snake_case , torch.Tensor ):
return mask
elif isinstance(_snake_case , PIL.Image.Image ):
_A = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
_A , _A = mask[0].size
_A , _A = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_A = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
_A = np.concatenate(_snake_case , axis=0 )
_A = mask.astype(np.floataa ) / 255.0
_A = 0
_A = 1
_A = torch.from_numpy(_snake_case )
elif isinstance(mask[0] , torch.Tensor ):
_A = torch.cat(_snake_case , dim=0 )
return mask
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : UNetaDModel
a__ : RePaintScheduler
def __init__( self : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any ) -> Optional[Any]:
super().__init__()
self.register_modules(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase )
@torch.no_grad()
def __call__( self : Optional[int] , __lowerCAmelCase : Union[torch.Tensor, PIL.Image.Image] , __lowerCAmelCase : Union[torch.Tensor, PIL.Image.Image] , __lowerCAmelCase : int = 2_50 , __lowerCAmelCase : float = 0.0 , __lowerCAmelCase : int = 10 , __lowerCAmelCase : int = 10 , __lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
_A = image
_A = _preprocess_image(__lowerCAmelCase )
_A = original_image.to(device=self.device , dtype=self.unet.dtype )
_A = _preprocess_mask(__lowerCAmelCase )
_A = mask_image.to(device=self.device , dtype=self.unet.dtype )
_A = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(__lowerCAmelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(__lowerCAmelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
_A = original_image.shape
_A = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , self.device )
_A = eta
_A = self.scheduler.timesteps[0] + 1
_A = generator[0] if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
_A = self.unet(__lowerCAmelCase , __lowerCAmelCase ).sample
# compute previous image: x_t -> x_t-1
_A = self.scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
_A = self.scheduler.undo_step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_A = t
_A = (image / 2 + 0.5).clamp(0 , 1 )
_A = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_A = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 2 | """simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__A : List[Any] = logging.get_logger("transformers.models.speecht5")
__A : Optional[Any] = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
__A : Optional[int] = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
__A : List[str] = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
__A : List[Any] = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
__A : Union[str, Any] = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
__A : Any = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
__A : Union[str, Any] = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
__A : Optional[int] = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
__A : Union[str, Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__A : Optional[Any] = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__A : Optional[int] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__A : int = []
__A : int = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
__A : Optional[Any] = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
__A : Tuple = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
__A : Union[str, Any] = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def lowercase ( UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : List[Any] , UpperCamelCase : int ):
"""simple docstring"""
for attribute in key.split("." ):
A__ : Dict =getattr(UpperCamelCase , UpperCamelCase )
if weight_type is not None:
A__ : Union[str, Any] =getattr(UpperCamelCase , UpperCamelCase ).shape
else:
A__ : Tuple =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
A__ : Any =value
elif weight_type == "weight_g":
A__ : Any =value
elif weight_type == "weight_v":
A__ : Any =value
elif weight_type == "bias":
A__ : Tuple =value
elif weight_type == "running_mean":
A__ : Dict =value
elif weight_type == "running_var":
A__ : List[str] =value
elif weight_type == "num_batches_tracked":
A__ : Dict =value
else:
A__ : Optional[int] =value
logger.info(F'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' )
def lowercase ( UpperCamelCase : Tuple , UpperCamelCase : Tuple ):
"""simple docstring"""
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
A__ , A__ : List[str] =key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowercase ( UpperCamelCase : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : Dict ):
"""simple docstring"""
A__ : Tuple =[]
if task == "s2t":
A__ : Dict =hf_model.speechta.encoder.prenet.feature_encoder
A__ : int =MAPPING_S2T
A__ : List[Any] =IGNORE_KEYS_S2T
elif task == "t2s":
A__ : Union[str, Any] =None
A__ : List[Any] =MAPPING_T2S
A__ : Tuple =IGNORE_KEYS_T2S
elif task == "s2s":
A__ : Optional[Any] =hf_model.speechta.encoder.prenet.feature_encoder
A__ : Tuple =MAPPING_S2S
A__ : Any =IGNORE_KEYS_S2S
else:
raise ValueError(F'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(UpperCamelCase , UpperCamelCase ):
logger.info(F'''{name} was ignored''' )
continue
A__ : Optional[Any] =False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , hf_model.config.feat_extract_norm == "group" , )
A__ : List[Any] =True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
A__ , A__ : Dict =key.split(".*." )
if prefix in name and suffix in name:
A__ : int =suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
A__ : List[Any] =True
if "*" in mapped_key:
A__ : Optional[int] =name.split(UpperCamelCase )[0].split("." )[-2]
A__ : int =mapped_key.replace("*" , UpperCamelCase )
if "weight_g" in name:
A__ : str ="weight_g"
elif "weight_v" in name:
A__ : Optional[Any] ="weight_v"
elif "bias" in name:
A__ : Any ="bias"
elif "weight" in name:
A__ : Optional[int] ="weight"
elif "running_mean" in name:
A__ : Tuple ="running_mean"
elif "running_var" in name:
A__ : Optional[int] ="running_var"
elif "num_batches_tracked" in name:
A__ : str ="num_batches_tracked"
else:
A__ : List[Any] =None
set_recursively(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
continue
if not is_used:
unused_weights.append(UpperCamelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowercase ( UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : Dict ):
"""simple docstring"""
A__ : Any =full_name.split("conv_layers." )[-1]
A__ : Dict =name.split("." )
A__ : int =int(items[0] )
A__ : str =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
A__ : Optional[Any] =value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
A__ : Optional[int] =value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
A__ : Any =value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
A__ : Any =value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCamelCase )
@torch.no_grad()
def lowercase ( UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str] , UpperCamelCase : str=None , UpperCamelCase : Any=None , UpperCamelCase : Tuple=None , ):
"""simple docstring"""
if config_path is not None:
A__ : Any =SpeechTaConfig.from_pretrained(UpperCamelCase )
else:
A__ : Any =SpeechTaConfig()
if task == "s2t":
A__ : Union[str, Any] =config.max_text_positions
A__ : Dict =SpeechTaForSpeechToText(UpperCamelCase )
elif task == "t2s":
A__ : str =1876
A__ : Optional[int] =600
A__ : Tuple =config.max_speech_positions
A__ : Optional[Any] =SpeechTaForTextToSpeech(UpperCamelCase )
elif task == "s2s":
A__ : str =1876
A__ : Tuple =config.max_speech_positions
A__ : Any =SpeechTaForSpeechToSpeech(UpperCamelCase )
else:
raise ValueError(F'''Unknown task name: {task}''' )
if vocab_path:
A__ : str =SpeechTaTokenizer(UpperCamelCase , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
A__ : Optional[Any] =AddedToken("<mask>" , lstrip=UpperCamelCase , rstrip=UpperCamelCase )
A__ : int =mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
A__ : Dict =SpeechTaFeatureExtractor()
A__ : Tuple =SpeechTaProcessor(tokenizer=UpperCamelCase , feature_extractor=UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
A__ : Union[str, Any] =torch.load(UpperCamelCase )
recursively_load_weights(fairseq_checkpoint["model"] , UpperCamelCase , UpperCamelCase )
model.save_pretrained(UpperCamelCase )
if repo_id:
print("Pushing to the hub..." )
processor.push_to_hub(UpperCamelCase )
model.push_to_hub(UpperCamelCase )
if __name__ == "__main__":
__A : Dict = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
__A : str = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 656 | 0 |
'''simple docstring'''
import os
lowerCAmelCase : List[str] = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 1_00, 'D': 5_00, 'M': 10_00}
def A_( A : str):
UpperCamelCase = 0
UpperCamelCase = 0
while index < len(A) - 1:
UpperCamelCase = SYMBOLS[numerals[index]]
UpperCamelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def A_( A : int):
UpperCamelCase = ''
UpperCamelCase = num // 1000
numerals += m_count * "M"
num %= 1000
UpperCamelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
UpperCamelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def A_( A : str = "/p089_roman.txt"):
UpperCamelCase = 0
with open(os.path.dirname(A) + roman_numerals_filename) as filea:
UpperCamelCase = filea.readlines()
for line in lines:
UpperCamelCase = line.strip()
UpperCamelCase = parse_roman_numerals(A)
UpperCamelCase = generate_roman_numerals(A)
savings += len(A) - len(A)
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 3 | """simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase):
'''simple docstring'''
__magic_name__ : List[Any] = [R"""h\.\d+\.attn\.bias""", R"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 50257 , UpperCamelCase__ : int = 1024 , UpperCamelCase__ : int = 768 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str = "gelu_new" , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 1E-5 , UpperCamelCase__ : float = 0.02 , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , ):
super().__init__()
A__ : Dict =prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
F''' `n_embd`: {n_embd} are not equal.''' )
A__ : Optional[int] =prefix_inner_dim
A__ : Optional[int] =prefix_hidden_dim
A__ : Optional[int] =(
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A__ : Optional[int] =(
nn.Linear(self.prefix_hidden_dim , UpperCamelCase__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
A__ : str =GPTaConfig(
vocab_size=UpperCamelCase__ , n_positions=UpperCamelCase__ , n_embd=UpperCamelCase__ , n_layer=UpperCamelCase__ , n_head=UpperCamelCase__ , n_inner=UpperCamelCase__ , activation_function=UpperCamelCase__ , resid_pdrop=UpperCamelCase__ , embd_pdrop=UpperCamelCase__ , attn_pdrop=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ , initializer_range=UpperCamelCase__ , scale_attn_weights=UpperCamelCase__ , use_cache=UpperCamelCase__ , scale_attn_by_inverse_layer_idx=UpperCamelCase__ , reorder_and_upcast_attn=UpperCamelCase__ , )
A__ : Any =GPTaLMHeadModel(UpperCamelCase__ )
def _UpperCAmelCase ( self : Any , UpperCamelCase__ : torch.Tensor , UpperCamelCase__ : torch.Tensor , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[torch.Tensor] = None , ):
A__ : int =self.transformer.transformer.wte(UpperCamelCase__ )
A__ : Tuple =self.encode_prefix(UpperCamelCase__ )
A__ : Union[str, Any] =self.decode_prefix(UpperCamelCase__ )
A__ : Tuple =torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
A__ : Any =self.get_dummy_token(input_ids.shape[0] , input_ids.device )
A__ : List[Any] =torch.cat((dummy_token, input_ids) , dim=1 )
A__ : Any =self.transformer(inputs_embeds=UpperCamelCase__ , labels=UpperCamelCase__ , attention_mask=UpperCamelCase__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : torch.device ):
return torch.zeros(UpperCamelCase__ , self.prefix_length , dtype=torch.intaa , device=UpperCamelCase__ )
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Tuple ):
return self.encode_prefix(UpperCamelCase__ )
@torch.no_grad()
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str ):
A__ : Optional[int] =torch.split(UpperCamelCase__ , 1 , dim=0 )
A__ : List[str] =[]
A__ : Dict =[]
for feature in features:
A__ : Any =self.decode_prefix(feature.to(UpperCamelCase__ ) ) # back to the clip feature
# Only support beam search for now
A__ , A__ : Optional[Any] =self.generate_beam(
input_embeds=UpperCamelCase__ , device=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
A__ : Optional[Any] =torch.stack(UpperCamelCase__ )
A__ : Optional[int] =torch.stack(UpperCamelCase__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def _UpperCAmelCase ( self : List[Any] , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : int = 5 , UpperCamelCase__ : int = 67 , UpperCamelCase__ : float = 1.0 , UpperCamelCase__ : Optional[int] = None , ):
A__ : str =eos_token_id
A__ : Optional[Any] =None
A__ : int =None
A__ : Union[str, Any] =torch.ones(UpperCamelCase__ , device=UpperCamelCase__ , dtype=torch.int )
A__ : Any =torch.zeros(UpperCamelCase__ , device=UpperCamelCase__ , dtype=torch.bool )
if input_embeds is not None:
A__ : Union[str, Any] =input_embeds
else:
A__ : Optional[Any] =self.transformer.transformer.wte(UpperCamelCase__ )
for i in range(UpperCamelCase__ ):
A__ : Optional[int] =self.transformer(inputs_embeds=UpperCamelCase__ )
A__ : Tuple =outputs.logits
A__ : Union[str, Any] =logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A__ : Optional[Any] =logits.softmax(-1 ).log()
if scores is None:
A__ , A__ : Union[str, Any] =logits.topk(UpperCamelCase__ , -1 )
A__ : Union[str, Any] =generated.expand(UpperCamelCase__ , *generated.shape[1:] )
A__ , A__ : Optional[int] =next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
A__ : str =next_tokens
else:
A__ : Optional[Any] =tokens.expand(UpperCamelCase__ , *tokens.shape[1:] )
A__ : str =torch.cat((tokens, next_tokens) , dim=1 )
else:
A__ : Union[str, Any] =-float(np.inf )
A__ : Dict =0
A__ : Optional[Any] =scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A__ : Optional[Any] =scores_sum / seq_lengths[:, None]
A__ , A__ : List[Any] =scores_sum_average.view(-1 ).topk(UpperCamelCase__ , -1 )
A__ : Tuple =next_tokens // scores_sum.shape[1]
A__ : List[Any] =seq_lengths[next_tokens_source]
A__ : int =next_tokens % scores_sum.shape[1]
A__ : str =next_tokens.unsqueeze(1 )
A__ : List[Any] =tokens[next_tokens_source]
A__ : int =torch.cat((tokens, next_tokens) , dim=1 )
A__ : List[str] =generated[next_tokens_source]
A__ : Optional[Any] =scores_sum_average * seq_lengths
A__ : Optional[int] =is_stopped[next_tokens_source]
A__ : List[str] =self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
A__ : str =torch.cat((generated, next_token_embed) , dim=1 )
A__ : str =is_stopped + next_tokens.eq(UpperCamelCase__ ).squeeze()
if is_stopped.all():
break
A__ : Optional[int] =scores / seq_lengths
A__ : List[Any] =scores.argsort(descending=UpperCamelCase__ )
# tokens tensors are already padded to max_seq_length
A__ : int =[tokens[i] for i in order]
A__ : Any =torch.stack(UpperCamelCase__ , dim=0 )
A__ : int =torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 656 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__UpperCamelCase : Any = logging.get_logger(__name__)
class a ( a__ ):
snake_case__ = ['''input_features''', '''attention_mask''']
def __init__( self , _snake_case=80 , _snake_case=1_60_00 , _snake_case=80 , _snake_case=0.0 , _snake_case=True , _snake_case=True , _snake_case=True , **_snake_case , ):
"""simple docstring"""
super().__init__(feature_size=_snake_case , sampling_rate=_snake_case , padding_value=_snake_case , **_snake_case )
lowerCAmelCase = num_mel_bins
lowerCAmelCase = do_ceptral_normalize
lowerCAmelCase = normalize_means
lowerCAmelCase = normalize_vars
lowerCAmelCase = True
def UpperCamelCase__ ( self , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
lowerCAmelCase = torch.from_numpy(_snake_case ).unsqueeze(0 )
lowerCAmelCase = ta_kaldi.fbank(_snake_case , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def UpperCamelCase__ ( _snake_case , _snake_case , _snake_case = True , _snake_case = True , _snake_case = 0.0 , ):
"""simple docstring"""
if normalize_means:
lowerCAmelCase = x[:input_length].mean(axis=0 )
lowerCAmelCase = np.subtract(_snake_case , _snake_case )
if normalize_vars:
lowerCAmelCase = x[:input_length].std(axis=0 )
lowerCAmelCase = np.divide(_snake_case , _snake_case )
if input_length < x.shape[0]:
lowerCAmelCase = padding_value
# make sure array is in float32
lowerCAmelCase = x.astype(np.floataa )
return x
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(_snake_case , _snake_case , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(_snake_case , _snake_case )
]
def __call__( self , _snake_case , _snake_case = False , _snake_case = None , _snake_case = False , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , **_snake_case , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowerCAmelCase = isinstance(_snake_case , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
lowerCAmelCase = is_batched_numpy or (
isinstance(_snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase = [np.asarray(_snake_case , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_snake_case , np.ndarray ):
lowerCAmelCase = np.asarray(_snake_case , dtype=np.floataa )
elif isinstance(_snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase = [raw_speech]
# extract fbank features
lowerCAmelCase = [self._extract_fbank_features(_snake_case ) for waveform in raw_speech]
# convert into correct format for padding
lowerCAmelCase = BatchFeature({'input_features': features} )
lowerCAmelCase = self.pad(
_snake_case , padding=_snake_case , max_length=_snake_case , truncation=_snake_case , pad_to_multiple_of=_snake_case , return_attention_mask=_snake_case , **_snake_case , )
# make sure list is in array format
lowerCAmelCase = padded_inputs.get('input_features' )
if isinstance(input_features[0] , _snake_case ):
lowerCAmelCase = [np.asarray(_snake_case , dtype=np.floataa ) for feature in input_features]
lowerCAmelCase = padded_inputs.get('attention_mask' )
if attention_mask is not None:
lowerCAmelCase = [np.asarray(_snake_case , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
lowerCAmelCase = (
np.array(_snake_case , dtype=np.intaa )
if self._get_padding_strategies(_snake_case , max_length=_snake_case ) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowerCAmelCase = self.normalize(
padded_inputs['input_features'] , attention_mask=_snake_case )
if return_tensors is not None:
lowerCAmelCase = padded_inputs.convert_to_tensors(_snake_case )
return padded_inputs
| 4 | """simple docstring"""
import os
def lowercase ( ):
"""simple docstring"""
A__ : List[Any] =os.path.dirname(os.path.realpath(UpperCamelCase ) )
A__ : str =os.path.join(UpperCamelCase , "triangle.txt" )
with open(UpperCamelCase ) as f:
A__ : Optional[int] =f.readlines()
A__ : str =[]
for line in triangle:
A__ : Union[str, Any] =[]
for number in line.strip().split(" " ):
numbers_from_line.append(int(UpperCamelCase ) )
a.append(UpperCamelCase )
for i in range(1 , len(UpperCamelCase ) ):
for j in range(len(a[i] ) ):
A__ : Union[str, Any] =a[i - 1][j] if j != len(a[i - 1] ) else 0
A__ : Union[str, Any] =a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(UpperCamelCase , UpperCamelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 656 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""asapp/sew-tiny-100k""": """https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json""",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : Optional[int] = '''sew'''
def __init__( self , _lowercase=32 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3_072 , _lowercase=2 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.02 , _lowercase=1e-5 , _lowercase="group" , _lowercase="gelu" , _lowercase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , _lowercase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _lowercase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _lowercase=False , _lowercase=128 , _lowercase=16 , _lowercase=True , _lowercase=0.05 , _lowercase=10 , _lowercase=2 , _lowercase=0.0 , _lowercase=10 , _lowercase=0 , _lowercase="mean" , _lowercase=False , _lowercase=False , _lowercase=256 , _lowercase=0 , _lowercase=1 , _lowercase=2 , **_lowercase , ):
"""simple docstring"""
super().__init__(**_lowercase , pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase )
_lowerCAmelCase = hidden_size
_lowerCAmelCase = feat_extract_norm
_lowerCAmelCase = feat_extract_activation
_lowerCAmelCase = list(_lowercase )
_lowerCAmelCase = list(_lowercase )
_lowerCAmelCase = list(_lowercase )
_lowerCAmelCase = conv_bias
_lowerCAmelCase = num_conv_pos_embeddings
_lowerCAmelCase = num_conv_pos_embedding_groups
_lowerCAmelCase = len(self.conv_dim )
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = squeeze_factor
_lowerCAmelCase = hidden_act
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = feat_proj_dropout
_lowerCAmelCase = final_dropout
_lowerCAmelCase = layerdrop
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = initializer_range
_lowerCAmelCase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
F'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'
F'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase = apply_spec_augment
_lowerCAmelCase = mask_time_prob
_lowerCAmelCase = mask_time_length
_lowerCAmelCase = mask_time_min_masks
_lowerCAmelCase = mask_feature_prob
_lowerCAmelCase = mask_feature_length
_lowerCAmelCase = mask_feature_min_masks
# ctc loss
_lowerCAmelCase = ctc_loss_reduction
_lowerCAmelCase = ctc_zero_infinity
# sequence classification
_lowerCAmelCase = use_weighted_layer_sum
_lowerCAmelCase = classifier_proj_size
@property
def _lowercase ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 5 | """simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A : int = logging.get_logger(__name__)
def lowercase ( UpperCamelCase : Any ):
"""simple docstring"""
A__ : str =OrderedDict()
for key, value in state_dict.items():
if key.startswith("module.encoder" ):
A__ : Dict =key.replace("module.encoder" , "glpn.encoder" )
if key.startswith("module.decoder" ):
A__ : Optional[int] =key.replace("module.decoder" , "decoder.stages" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
A__ : Tuple =key[key.find("patch_embed" ) + len("patch_embed" )]
A__ : Optional[Any] =key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(UpperCamelCase )-1}''' )
if "norm" in key:
A__ : Dict =key.replace("norm" , "layer_norm" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
A__ : Any =key[key.find("glpn.encoder.layer_norm" ) + len("glpn.encoder.layer_norm" )]
A__ : Tuple =key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(UpperCamelCase )-1}''' )
if "layer_norm1" in key:
A__ : List[Any] =key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
A__ : Optional[int] =key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
A__ : int =key[key.find("block" ) + len("block" )]
A__ : Optional[Any] =key.replace(F'''block{idx}''' , F'''block.{int(UpperCamelCase )-1}''' )
if "attn.q" in key:
A__ : Optional[Any] =key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
A__ : Union[str, Any] =key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
A__ : str =key.replace("attn" , "attention.self" )
if "fc1" in key:
A__ : Dict =key.replace("fc1" , "dense1" )
if "fc2" in key:
A__ : str =key.replace("fc2" , "dense2" )
if "linear_pred" in key:
A__ : List[Any] =key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
A__ : List[str] =key.replace("linear_fuse.conv" , "linear_fuse" )
A__ : Any =key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
A__ : str =key[key.find("linear_c" ) + len("linear_c" )]
A__ : Dict =key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(UpperCamelCase )-1}''' )
if "bot_conv" in key:
A__ : Union[str, Any] =key.replace("bot_conv" , "0.convolution" )
if "skip_conv1" in key:
A__ : List[Any] =key.replace("skip_conv1" , "1.convolution" )
if "skip_conv2" in key:
A__ : int =key.replace("skip_conv2" , "2.convolution" )
if "fusion1" in key:
A__ : Optional[Any] =key.replace("fusion1" , "1.fusion" )
if "fusion2" in key:
A__ : Optional[Any] =key.replace("fusion2" , "2.fusion" )
if "fusion3" in key:
A__ : int =key.replace("fusion3" , "3.fusion" )
if "fusion" in key and "conv" in key:
A__ : List[str] =key.replace("conv" , "convolutional_layer" )
if key.startswith("module.last_layer_depth" ):
A__ : Tuple =key.replace("module.last_layer_depth" , "head.head" )
A__ : int =value
return new_state_dict
def lowercase ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict ):
"""simple docstring"""
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
A__ : int =state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
A__ : str =state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
A__ : List[str] =kv_weight[
: config.hidden_sizes[i], :
]
A__ : Dict =kv_bias[: config.hidden_sizes[i]]
A__ : Any =kv_weight[
config.hidden_sizes[i] :, :
]
A__ : Any =kv_bias[config.hidden_sizes[i] :]
def lowercase ( ):
"""simple docstring"""
A__ : Optional[Any] ="http://images.cocodataset.org/val2017/000000039769.jpg"
A__ : List[Any] =Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return image
@torch.no_grad()
def lowercase ( UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : List[str]=False , UpperCamelCase : str=None ):
"""simple docstring"""
A__ : List[str] =GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
A__ : str =GLPNImageProcessor()
# prepare image
A__ : Any =prepare_img()
A__ : Optional[int] =image_processor(images=UpperCamelCase , return_tensors="pt" ).pixel_values
logger.info("Converting model..." )
# load original state dict
A__ : int =torch.load(UpperCamelCase , map_location=torch.device("cpu" ) )
# rename keys
A__ : Union[str, Any] =rename_keys(UpperCamelCase )
# key and value matrices need special treatment
read_in_k_v(UpperCamelCase , UpperCamelCase )
# create HuggingFace model and load state dict
A__ : Optional[int] =GLPNForDepthEstimation(UpperCamelCase )
model.load_state_dict(UpperCamelCase )
model.eval()
# forward pass
A__ : int =model(UpperCamelCase )
A__ : Optional[Any] =outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
A__ : List[Any] =torch.tensor(
[[4.41_47, 4.08_73, 4.06_73], [3.78_90, 3.28_81, 3.15_25], [3.76_74, 3.54_23, 3.49_13]] )
elif "kitti" in model_name:
A__ : Tuple =torch.tensor(
[[3.42_91, 2.78_65, 2.51_51], [3.28_41, 2.70_21, 2.35_02], [3.11_47, 2.46_25, 2.24_81]] )
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
A__ : str =torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , UpperCamelCase , atol=1E-4 )
print("Looks ok!" )
# finally, push to hub if required
if push_to_hub:
logger.info("Pushing model and image processor to the hub..." )
model.push_to_hub(
repo_path_or_name=Path(UpperCamelCase , UpperCamelCase ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=UpperCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCamelCase , UpperCamelCase ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=UpperCamelCase , )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path",
default=None,
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
parser.add_argument(
"--model_name",
default="glpn-kitti",
type=str,
help="Name of the model in case you're pushing to the hub.",
)
__A : Any = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 656 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCamelCase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: int ):
SCREAMING_SNAKE_CASE__ = b.T
SCREAMING_SNAKE_CASE__ = np.sum(np.square(UpperCamelCase__ ) , axis=1 )
SCREAMING_SNAKE_CASE__ = np.sum(np.square(UpperCamelCase__ ) , axis=0 )
SCREAMING_SNAKE_CASE__ = np.matmul(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = aa[:, None] - 2 * ab + ba[None, :]
return d
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Optional[Any] ):
SCREAMING_SNAKE_CASE__ = x.reshape(-1 , 3 )
SCREAMING_SNAKE_CASE__ = squared_euclidean_distance(UpperCamelCase__ , UpperCamelCase__ )
return np.argmin(UpperCamelCase__ , axis=1 )
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = ["pixel_values"]
def __init__( self :int , __A :Optional[Union[List[List[int]], np.ndarray]] = None , __A :bool = True , __A :Dict[str, int] = None , __A :PILImageResampling = PILImageResampling.BILINEAR , __A :bool = True , __A :bool = True , **__A :int , ) -> None:
"""simple docstring"""
super().__init__(**__A )
SCREAMING_SNAKE_CASE__ = size if size is not None else {"""height""": 256, """width""": 256}
SCREAMING_SNAKE_CASE__ = get_size_dict(__A )
SCREAMING_SNAKE_CASE__ = np.array(__A ) if clusters is not None else None
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size
SCREAMING_SNAKE_CASE__ = resample
SCREAMING_SNAKE_CASE__ = do_normalize
SCREAMING_SNAKE_CASE__ = do_color_quantize
def _snake_case ( self :str , __A :np.ndarray , __A :Dict[str, int] , __A :PILImageResampling = PILImageResampling.BILINEAR , __A :Optional[Union[str, ChannelDimension]] = None , **__A :List[str] , ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
__A , size=(size["""height"""], size["""width"""]) , resample=__A , data_format=__A , **__A )
def _snake_case ( self :List[Any] , __A :np.ndarray , __A :Optional[Union[str, ChannelDimension]] = None , ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = rescale(image=__A , scale=1 / 1_2_7.5 , data_format=__A )
SCREAMING_SNAKE_CASE__ = image - 1
return image
def _snake_case ( self :Optional[int] , __A :ImageInput , __A :bool = None , __A :Dict[str, int] = None , __A :PILImageResampling = None , __A :bool = None , __A :Optional[bool] = None , __A :Optional[Union[List[List[int]], np.ndarray]] = None , __A :Optional[Union[str, TensorType]] = None , __A :Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **__A :List[str] , ) -> PIL.Image.Image:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ = get_size_dict(__A )
SCREAMING_SNAKE_CASE__ = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
SCREAMING_SNAKE_CASE__ = clusters if clusters is not None else self.clusters
SCREAMING_SNAKE_CASE__ = np.array(__A )
SCREAMING_SNAKE_CASE__ = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_color_quantize and clusters is None:
raise ValueError("""Clusters must be specified if do_color_quantize is True.""" )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ = [to_numpy_array(__A ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ = [self.resize(image=__A , size=__A , resample=__A ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ = [self.normalize(image=__A ) for image in images]
if do_color_quantize:
SCREAMING_SNAKE_CASE__ = [to_channel_dimension_format(__A , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
SCREAMING_SNAKE_CASE__ = np.array(__A )
SCREAMING_SNAKE_CASE__ = color_quantize(__A , __A ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
SCREAMING_SNAKE_CASE__ = images.shape[0]
SCREAMING_SNAKE_CASE__ = images.reshape(__A , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
SCREAMING_SNAKE_CASE__ = list(__A )
else:
SCREAMING_SNAKE_CASE__ = [to_channel_dimension_format(__A , __A ) for image in images]
SCREAMING_SNAKE_CASE__ = {"""input_ids""": images}
return BatchFeature(data=__A , tensor_type=__A ) | 6 | """simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__A : Any = logging.get_logger(__name__)
__A : Optional[Any] = {
"EleutherAI/gpt-neo-1.3B": "https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Union[str, Any] = """gpt_neo"""
__magic_name__ : Union[str, Any] = ["""past_key_values"""]
__magic_name__ : Dict = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self : Dict , UpperCamelCase__ : List[Any]=50257 , UpperCamelCase__ : Optional[Any]=2048 , UpperCamelCase__ : Tuple=2048 , UpperCamelCase__ : int=24 , UpperCamelCase__ : Dict=[[["global", "local"], 12]] , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : str=256 , UpperCamelCase__ : List[str]="gelu_new" , UpperCamelCase__ : List[str]=0.0 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : List[str]=1E-5 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[Any]=50256 , UpperCamelCase__ : List[str]=50256 , **UpperCamelCase__ : str , ):
A__ : Optional[Any] =vocab_size
A__ : Dict =max_position_embeddings
A__ : List[str] =hidden_size
A__ : List[Any] =num_layers
A__ : Tuple =num_heads
A__ : List[str] =intermediate_size
A__ : Tuple =window_size
A__ : Dict =activation_function
A__ : str =resid_dropout
A__ : Union[str, Any] =embed_dropout
A__ : List[str] =attention_dropout
A__ : Tuple =classifier_dropout
A__ : int =layer_norm_epsilon
A__ : int =initializer_range
A__ : str =use_cache
A__ : Tuple =bos_token_id
A__ : int =eos_token_id
A__ : int =attention_types
A__ : Any =self.expand_attention_types_params(UpperCamelCase__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
@staticmethod
def _UpperCAmelCase ( UpperCamelCase__ : List[str] ):
A__ : Optional[Any] =[]
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowercase ( UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ):
"""simple docstring"""
import torch
A__ : List[str] =input.size()
A__ : Dict =len(UpperCamelCase )
A__ : Optional[int] =shape[dimension]
A__ : str =torch.arange(0 , UpperCamelCase , UpperCamelCase )
A__ : Optional[int] =torch.div(sizedim - size , UpperCamelCase , rounding_mode="floor" ) + 1
A__ : str =torch.arange(UpperCamelCase ) + low_indices[:min_length][:, None]
A__ : Tuple =[slice(UpperCamelCase )] * rank
A__ : int =indices
A__ : Optional[int] =input[s]
A__ : Union[str, Any] =list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(UpperCamelCase )
def lowercase ( UpperCamelCase : str , UpperCamelCase : Any ):
"""simple docstring"""
import torch
A__ : List[str] =torch.arange(1 , UpperCamelCase )
A__ : List[Any] =torch.remainder(UpperCamelCase , UpperCamelCase )
A__ : Optional[int] =remainders == 0
A__ : str =candidates[divisor_indices]
A__ : int =torch.max(UpperCamelCase )
return largest_divisor, torch.div(UpperCamelCase , UpperCamelCase , rounding_mode="floor" )
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
@property
def _UpperCAmelCase ( self : List[Any] ):
A__ : Optional[int] =OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction="inputs" )
A__ : Optional[int] ={0: "batch", 1: "past_sequence + sequence"}
else:
A__ : Tuple ={0: "batch", 1: "sequence"}
return common_inputs
@property
def _UpperCAmelCase ( self : List[str] ):
return self._config.num_heads
def _UpperCAmelCase ( self : int , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
A__ : Union[str, Any] =super(UpperCamelCase__ , self ).generate_dummy_inputs(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
# We need to order the input in the way they appears in the forward()
A__ : List[Any] =OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A__ , A__ : Union[str, Any] =common_inputs["input_ids"].shape
# Not using the same length for past_key_values
A__ : Union[str, Any] =seqlen + 2
A__ : List[Any] =(
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
A__ : Optional[Any] =[
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(self.num_layers )
]
A__ : Optional[Any] =common_inputs["attention_mask"]
if self.use_past:
A__ : Any =ordered_inputs["attention_mask"].dtype
A__ : Tuple =torch.cat(
[ordered_inputs["attention_mask"], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
return ordered_inputs
@property
def _UpperCAmelCase ( self : List[str] ):
return 13
| 656 | 0 |
"""simple docstring"""
import os
import numpy
import onnx
def _snake_case ( _snake_case : Optional[Any] , _snake_case : Any ) -> Dict:
'''simple docstring'''
_A = a.name
_A = b.name
_A = ''
_A = ''
_A = a == b
_A = name_a
_A = name_b
return res
def _snake_case ( _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : List[Any] ) -> Tuple:
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(_snake_case , _snake_case )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , _snake_case , _snake_case )
_graph_replace_input_with(node_proto.attribute[1].g , _snake_case , _snake_case )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , _snake_case , _snake_case )
def _snake_case ( _snake_case : Any , _snake_case : Optional[Any] , _snake_case : Any ) -> int:
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(_snake_case , _snake_case , _snake_case )
def _snake_case ( _snake_case : List[Any] , _snake_case : List[str] , _snake_case : Any ) -> Optional[Any]:
'''simple docstring'''
_A = list(model.graph.initializer )
_A = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
_A = inits[i].name
_A = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , _snake_case , _snake_case )
def _snake_case ( _snake_case : int ) -> int:
'''simple docstring'''
_A = os.path.dirname(_snake_case )
_A = os.path.basename(_snake_case )
_A = onnx.load(os.path.join(_snake_case , _snake_case ) )
_A = list(model.graph.initializer )
_A = set()
_A = {}
_A = []
_A = 0
for i in range(len(_snake_case ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(_snake_case ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(_snake_case )
dup_set.add(_snake_case )
_A = inits[j].data_type
_A = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , _snake_case )
total_reduced_size += mem_size
_A = inits[i].name
_A = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(_snake_case )
else:
_A = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 10_24 / 10_24 / 10_24 , 'GB' )
_A = sorted(_snake_case )
_remove_dup_initializers_from_model(_snake_case , _snake_case , _snake_case )
_A = 'optimized_' + model_file_name
_A = os.path.join(_snake_case , _snake_case )
onnx.save(_snake_case , _snake_case )
return new_model
| 7 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Union[str, Any] = logging.get_logger(__name__)
__A : Any = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Tuple = """megatron-bert"""
def __init__( self : Tuple , UpperCamelCase__ : Dict=29056 , UpperCamelCase__ : int=1024 , UpperCamelCase__ : Optional[int]=24 , UpperCamelCase__ : Dict=16 , UpperCamelCase__ : int=4096 , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : int=512 , UpperCamelCase__ : str=2 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : str="absolute" , UpperCamelCase__ : Dict=True , **UpperCamelCase__ : Tuple , ):
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A__ : Optional[int] =vocab_size
A__ : Optional[int] =hidden_size
A__ : str =num_hidden_layers
A__ : Any =num_attention_heads
A__ : str =hidden_act
A__ : Optional[int] =intermediate_size
A__ : str =hidden_dropout_prob
A__ : str =attention_probs_dropout_prob
A__ : List[Any] =max_position_embeddings
A__ : List[Any] =type_vocab_size
A__ : Tuple =initializer_range
A__ : Any =layer_norm_eps
A__ : Any =position_embedding_type
A__ : Union[str, Any] =use_cache
| 656 | 0 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None):
'''simple docstring'''
__A : List[Any] = list(poly_a or [0])[:]
__A : Optional[int] = list(poly_b or [0])[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
__A : Union[str, Any] = len(self.polyA)
while self.polyB[-1] == 0:
self.polyB.pop()
__A : Optional[int] = len(self.polyB)
# Add 0 to make lengths equal a power of 2
__A : Optional[Any] = int(
2 ** np.ceil(np.loga(len(self.polyA) + len(self.polyB) - 1)))
while len(self.polyA) < self.c_max_length:
self.polyA.append(0)
while len(self.polyB) < self.c_max_length:
self.polyB.append(0)
# A complex root used for the fourier transform
__A : str = complex(mpmath.root(x=1 , n=self.c_max_length , k=1))
# The product
__A : Tuple = self.__multiply()
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB]
# Corner case
if len(_UpperCAmelCase) <= 1:
return dft[0]
#
__A : Dict = self.c_max_length // 2
while next_ncol > 0:
__A : Optional[Any] = [[] for i in range(_UpperCAmelCase)]
__A : Tuple = self.root**next_ncol
# First half of next step
__A : Optional[Any] = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(_UpperCAmelCase):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j])
current_root *= root
# Second half of next step
__A : List[str] = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(_UpperCAmelCase):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j])
current_root *= root
# Update
__A : Optional[int] = new_dft
__A : Tuple = next_ncol // 2
return dft[0]
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.__dft('A')
__A : Optional[Any] = self.__dft('B')
__A : str = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length)]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0]) <= 1:
return inverce_c[0]
# Inverse DFT
__A : Dict = 2
while next_ncol <= self.c_max_length:
__A : Optional[int] = [[] for i in range(_UpperCAmelCase)]
__A : Any = self.root ** (next_ncol // 2)
__A : Tuple = 1
# First half of next step
for j in range(self.c_max_length // next_ncol):
for i in range(next_ncol // 2):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2)
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root))
current_root *= root
# Update
__A : int = new_inverse_c
next_ncol *= 2
# Unpack
__A : Optional[int] = [round(x[0].real , 8) + round(x[0].imag , 8) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self):
'''simple docstring'''
__A : int = 'A = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A]))
__A : Optional[Any] = 'B = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B]))
__A : str = 'A*B = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.product))
return F'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | """simple docstring"""
from __future__ import annotations
def lowercase ( UpperCamelCase : list[float] ):
"""simple docstring"""
if len(UpperCamelCase ) < 2:
raise ValueError("Monogons and Digons are not polygons in the Euclidean space" )
if any(i <= 0 for i in nums ):
raise ValueError("All values must be greater than 0" )
A__ : Union[str, Any] =nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 656 | 0 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
SCREAMING_SNAKE_CASE__ = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def A ( __UpperCamelCase=None ) -> Union[str, Any]:
if subparsers is not None:
A__ = subparsers.add_parser('tpu-config' , description=_description )
else:
A__ = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
A__ = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=__UpperCamelCase , default=__UpperCamelCase , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=__UpperCamelCase , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=__UpperCamelCase , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
A__ = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=__UpperCamelCase , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=__UpperCamelCase )
return parser
def A ( __UpperCamelCase ) -> Optional[Any]:
A__ = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(__UpperCamelCase ):
A__ = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
A__ = defaults.command_file
if not args.command and defaults.commands is not None:
A__ = defaults.commands
if not args.tpu_name:
A__ = defaults.tpu_name
if not args.tpu_zone:
A__ = defaults.tpu_zone
if args.accelerate_version == "dev":
A__ = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
A__ = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , __UpperCamelCase ):
A__ = f'''accelerate=={args.accelerate_version}'''
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
A__ = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , __UpperCamelCase ):
A__ = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
A__ = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [f'''pip install {args.accelerate_version}''']
new_cmd += args.command
A__ = '; '.join(__UpperCamelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
A__ = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'''Running {" ".join(__UpperCamelCase )}''' )
return
subprocess.run(__UpperCamelCase )
print('Successfully setup pod.' )
def A ( ) -> Optional[Any]:
A__ = tpu_command_parser()
A__ = parser.parse_args()
tpu_command_launcher(__UpperCamelCase )
| 9 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__A : Optional[Any] = {
"configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
"MEGA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegaForCausalLM",
"MegaForMaskedLM",
"MegaForMultipleChoice",
"MegaForQuestionAnswering",
"MegaForSequenceClassification",
"MegaForTokenClassification",
"MegaModel",
"MegaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__A : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 656 | 0 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
_lowerCAmelCase = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Whether to use SortishSampler or not."} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
}, )
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = super().to_dict()
for k, v in d.items():
if isinstance(_A , _A ):
_UpperCamelCase = v.to_dict()
return d
| 10 | """simple docstring"""
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
if num <= 0:
raise ValueError("Input must be a positive integer" )
A__ : Union[str, Any] =[True] * (num + 1)
A__ : Union[str, Any] =2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , UpperCamelCase ):
A__ : str =False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Optional[int] = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 656 | 0 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Tuple = 'umt5'
__lowerCamelCase : Optional[int] = ['past_key_values']
def __init__(self , A=250_112 , A=512 , A=64 , A=1_024 , A=8 , A=None , A=6 , A=32 , A=128 , A=0.1 , A=1E-6 , A=1.0 , A="gated-gelu" , A=True , A=True , A="T5Tokenizer" , A=True , A=0 , A=1 , A=0 , **A , ) -> Any:
"""simple docstring"""
super().__init__(
is_encoder_decoder=A , tokenizer_class=A , tie_word_embeddings=A , pad_token_id=A , eos_token_id=A , decoder_start_token_id=A , **A , )
_a = vocab_size
_a = d_model
_a = d_kv
_a = d_ff
_a = num_layers
_a = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_a = num_heads
_a = relative_attention_num_buckets
_a = relative_attention_max_distance
_a = dropout_rate
_a = layer_norm_epsilon
_a = initializer_factor
_a = feed_forward_proj
_a = use_cache
_a = self.feed_forward_proj.split('''-''' )
_a = act_info[-1]
_a = act_info[0] == '''gated'''
if len(A ) > 1 and act_info[0] != "gated" or len(A ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
if feed_forward_proj == "gated-gelu":
_a = '''gelu_new'''
@property
def a__ (self ) -> Any:
"""simple docstring"""
return self.d_model
@property
def a__ (self ) -> Tuple:
"""simple docstring"""
return self.num_heads
@property
def a__ (self ) -> List[Any]:
"""simple docstring"""
return self.num_layers
class __A ( A ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def a__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
_a = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
_a = '''past_encoder_sequence + sequence'''
_a = {0: '''batch'''}
_a = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
_a = {0: '''batch''', 1: '''decoder_sequence'''}
_a = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(A , direction='''inputs''' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def a__ (self ) -> int:
"""simple docstring"""
return 13
@property
def a__ (self ) -> float:
"""simple docstring"""
return 5E-4
| 11 | """simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
def _UpperCAmelCase ( self : List[Any] ):
A__ : Tuple =torch.nn.Linear(10 , 10 )
A__ : List[str] =torch.optim.SGD(model.parameters() , 0.1 )
A__ : Union[str, Any] =Accelerator()
A__ : str =accelerator.prepare(UpperCamelCase__ )
try:
pickle.loads(pickle.dumps(UpperCamelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 656 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCamelCase__ : Tuple = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCamelCase__ : List[str] = TaTokenizerFast
lowerCamelCase__ : int = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : str = [
"""MT5EncoderModel""",
"""MT5ForConditionalGeneration""",
"""MT5ForQuestionAnswering""",
"""MT5Model""",
"""MT5PreTrainedModel""",
"""MT5Stack""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : str = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCamelCase__ : Tuple = _LazyModule(
__name__,
globals()["""__file__"""],
_import_structure,
extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast},
module_spec=__spec__,
)
| 12 | """simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__A : Optional[int] = None
__A : Union[str, Any] = logging.get_logger(__name__)
__A : List[Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__A : str = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
},
"tokenizer_file": {
"google/bigbird-roberta-base": (
"https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"
),
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"
),
},
}
__A : List[str] = {
"google/bigbird-roberta-base": 4_096,
"google/bigbird-roberta-large": 4_096,
"google/bigbird-base-trivia-itc": 4_096,
}
__A : Tuple = "▁"
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Dict = VOCAB_FILES_NAMES
__magic_name__ : Any = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : List[Any] = BigBirdTokenizer
__magic_name__ : Any = ["""input_ids""", """attention_mask"""]
__magic_name__ : List[int] = []
def __init__( self : str , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Union[str, Any]="<unk>" , UpperCamelCase__ : str="<s>" , UpperCamelCase__ : int="</s>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[Any]="[SEP]" , UpperCamelCase__ : List[Any]="[MASK]" , UpperCamelCase__ : str="[CLS]" , **UpperCamelCase__ : List[Any] , ):
A__ : Optional[int] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else bos_token
A__ : Optional[Any] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token
A__ : Optional[int] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else unk_token
A__ : int =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token
A__ : str =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cls_token
A__ : List[Any] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
A__ : str =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , **UpperCamelCase__ , )
A__ : List[Any] =vocab_file
A__ : Optional[int] =False if not self.vocab_file else True
def _UpperCAmelCase ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : Tuple =[self.sep_token_id]
A__ : str =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1]
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : Tuple =[self.sep_token_id]
A__ : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A__ : List[str] =os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
return (out_vocab_file,)
| 656 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : Union[str, Any] = '| <pad> <unk> <s> </s> a b c d e f g h i j k'.split()
__lowerCamelCase : Optional[Any] = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__lowerCamelCase : str = {
'unk_token': '<unk>',
'bos_token': '<s>',
'eos_token': '</s>',
}
__lowerCamelCase : List[Any] = {
'feature_size': 1,
'padding_value': 0.0,
'sampling_rate': 1_60_00,
'return_attention_mask': False,
'do_normalize': True,
}
__lowerCamelCase : List[str] = tempfile.mkdtemp()
__lowerCamelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
with open(self.feature_extraction_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
# load decoder from hub
__lowerCamelCase : List[str] = 'hf-internal-testing/ngram-beam-search-decoder'
def lowercase_ ( self , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
__lowerCamelCase : Dict = self.add_kwargs_tokens_map.copy()
kwargs.update(SCREAMING_SNAKE_CASE_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , **SCREAMING_SNAKE_CASE_ ) -> str:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , **SCREAMING_SNAKE_CASE_ ) -> str:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase : str = self.get_tokenizer()
__lowerCamelCase : Dict = self.get_feature_extractor()
__lowerCamelCase : List[Any] = self.get_decoder()
__lowerCamelCase : List[Any] = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
__lowerCamelCase : Optional[int] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , SCREAMING_SNAKE_CASE_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : Union[str, Any] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__lowerCamelCase : Dict = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowercase_ ( self ) -> int:
__lowerCamelCase : Optional[Any] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['xx'] )
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , 'include' ):
WavaVecaProcessorWithLM(
tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowercase_ ( self ) -> int:
__lowerCamelCase : Dict = self.get_feature_extractor()
__lowerCamelCase : Any = self.get_tokenizer()
__lowerCamelCase : Any = self.get_decoder()
__lowerCamelCase : Optional[int] = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = floats_list((3, 10_00) )
__lowerCamelCase : Union[str, Any] = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__lowerCamelCase : Union[str, Any] = processor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : Union[str, Any] = self.get_feature_extractor()
__lowerCamelCase : Optional[int] = self.get_tokenizer()
__lowerCamelCase : Optional[Any] = self.get_decoder()
__lowerCamelCase : str = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = 'This is a test string'
__lowerCamelCase : Any = processor(text=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = tokenizer(SCREAMING_SNAKE_CASE_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_=(2, 10, 16) , SCREAMING_SNAKE_CASE_=77 ) -> List[Any]:
np.random.seed(SCREAMING_SNAKE_CASE_ )
return np.random.rand(*SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : str = self.get_feature_extractor()
__lowerCamelCase : Union[str, Any] = self.get_tokenizer()
__lowerCamelCase : Optional[Any] = self.get_decoder()
__lowerCamelCase : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__lowerCamelCase : Optional[int] = processor.decode(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[str] = decoder.decode_beams(SCREAMING_SNAKE_CASE_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('</s> <s> </s>' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['fork'], ['spawn']] )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> str:
__lowerCamelCase : Optional[int] = self.get_feature_extractor()
__lowerCamelCase : Any = self.get_tokenizer()
__lowerCamelCase : List[Any] = self.get_decoder()
__lowerCamelCase : str = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[Any] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__lowerCamelCase : List[str] = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
else:
with get_context(SCREAMING_SNAKE_CASE_ ).Pool() as pool:
__lowerCamelCase : str = processor.batch_decode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = list(SCREAMING_SNAKE_CASE_ )
with get_context('fork' ).Pool() as p:
__lowerCamelCase : Dict = decoder.decode_beams_batch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.text )
self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] , decoded_processor.text )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.logit_score )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.lm_score )
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : str = self.get_feature_extractor()
__lowerCamelCase : str = self.get_tokenizer()
__lowerCamelCase : Any = self.get_decoder()
__lowerCamelCase : str = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = self._get_dummy_logits()
__lowerCamelCase : int = 15
__lowerCamelCase : Dict = -2_0.0
__lowerCamelCase : Optional[Any] = -4.0
__lowerCamelCase : List[Any] = processor.batch_decode(
SCREAMING_SNAKE_CASE_ , beam_width=SCREAMING_SNAKE_CASE_ , beam_prune_logp=SCREAMING_SNAKE_CASE_ , token_min_logp=SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : List[Any] = decoded_processor_out.text
__lowerCamelCase : Optional[int] = list(SCREAMING_SNAKE_CASE_ )
with get_context('fork' ).Pool() as pool:
__lowerCamelCase : List[Any] = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , beam_width=SCREAMING_SNAKE_CASE_ , beam_prune_logp=SCREAMING_SNAKE_CASE_ , token_min_logp=SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Union[str, Any] = [d[0][0] for d in decoded_decoder_out]
__lowerCamelCase : Tuple = [d[0][2] for d in decoded_decoder_out]
__lowerCamelCase : List[Any] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] , SCREAMING_SNAKE_CASE_ )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : List[Any] = self.get_feature_extractor()
__lowerCamelCase : int = self.get_tokenizer()
__lowerCamelCase : int = self.get_decoder()
__lowerCamelCase : Dict = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = self._get_dummy_logits()
__lowerCamelCase : Union[str, Any] = 2.0
__lowerCamelCase : str = 5.0
__lowerCamelCase : int = -2_0.0
__lowerCamelCase : Optional[Any] = True
__lowerCamelCase : str = processor.batch_decode(
SCREAMING_SNAKE_CASE_ , alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , unk_score_offset=SCREAMING_SNAKE_CASE_ , lm_score_boundary=SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Any = decoded_processor_out.text
__lowerCamelCase : int = list(SCREAMING_SNAKE_CASE_ )
decoder.reset_params(
alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , unk_score_offset=SCREAMING_SNAKE_CASE_ , lm_score_boundary=SCREAMING_SNAKE_CASE_ , )
with get_context('fork' ).Pool() as pool:
__lowerCamelCase : Union[str, Any] = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Tuple = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -2_0.0 )
self.assertEqual(lm_model.score_boundary , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase : Any = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__lowerCamelCase : Dict = processor.decoder.model_container[processor.decoder._model_key]
__lowerCamelCase : str = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__lowerCamelCase : Optional[int] = os.listdir(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = ['alphabet.json', 'language_model']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : Union[str, Any] = snapshot_download('hf-internal-testing/processor_with_lm' )
__lowerCamelCase : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = processor.decoder.model_container[processor.decoder._model_key]
__lowerCamelCase : int = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__lowerCamelCase : str = os.listdir(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = os.listdir(SCREAMING_SNAKE_CASE_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> str:
__lowerCamelCase : Optional[int] = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__lowerCamelCase : Dict = AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm' )
__lowerCamelCase : Tuple = floats_list((3, 10_00) )
__lowerCamelCase : Optional[Any] = processor_wavaveca(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__lowerCamelCase : Optional[int] = processor_auto(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
__lowerCamelCase : int = self._get_dummy_logits()
__lowerCamelCase : Tuple = processor_wavaveca.batch_decode(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[Any] = processor_auto.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowercase_ ( self ) -> int:
__lowerCamelCase : Optional[int] = self.get_feature_extractor()
__lowerCamelCase : List[str] = self.get_tokenizer()
__lowerCamelCase : List[Any] = self.get_decoder()
__lowerCamelCase : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
@staticmethod
def lowercase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
__lowerCamelCase : int = [d[key] for d in offsets]
return retrieved_list
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : str = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__lowerCamelCase : Optional[Any] = self._get_dummy_logits()[0]
__lowerCamelCase : Dict = processor.decode(SCREAMING_SNAKE_CASE_ , output_word_offsets=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] , 'word' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'end_offset' ) , [1, 3, 5] )
def lowercase_ ( self ) -> int:
__lowerCamelCase : Tuple = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__lowerCamelCase : Union[str, Any] = self._get_dummy_logits()
__lowerCamelCase : Tuple = processor.batch_decode(SCREAMING_SNAKE_CASE_ , output_word_offsets=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(
[' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) for o in outputs['word_offsets']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'end_offset' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowercase_ ( self ) -> Union[str, Any]:
import torch
__lowerCamelCase : Optional[Any] = load_dataset('common_voice' , 'en' , split='train' , streaming=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = ds.cast_column('audio' , datasets.Audio(sampling_rate=1_60_00 ) )
__lowerCamelCase : List[str] = iter(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = next(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
__lowerCamelCase : Tuple = WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__lowerCamelCase : List[Any] = processor(sample['audio']['array'] , return_tensors='pt' ).input_values
with torch.no_grad():
__lowerCamelCase : Optional[int] = model(SCREAMING_SNAKE_CASE_ ).logits.cpu().numpy()
__lowerCamelCase : Optional[int] = processor.decode(logits[0] , output_word_offsets=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__lowerCamelCase : Optional[int] = [
{
'start_time': d['start_offset'] * time_offset,
'end_time': d['end_offset'] * time_offset,
'word': d['word'],
}
for d in output['word_offsets']
]
__lowerCamelCase : Optional[Any] = 'WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'
# output words
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) , output.text )
# output times
__lowerCamelCase : str = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'start_time' ) )
__lowerCamelCase : Tuple = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'end_time' ) )
# fmt: off
__lowerCamelCase : Any = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
__lowerCamelCase : str = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=0.0_1 ) )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=0.0_1 ) )
| 13 | """simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Optional[int] = {"vocab_file": "spiece.model"}
__A : List[Any] = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Dict="<s>" , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : Union[str, Any]="<unk>" , UpperCamelCase__ : Optional[int]="<sep>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[int]="<cls>" , UpperCamelCase__ : List[str]="<mask>" , UpperCamelCase__ : Optional[Any]=["<eop>", "<eod>"] , UpperCamelCase__ : Optional[Dict[str, Any]] = None , **UpperCamelCase__ : Dict , ):
A__ : List[str] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
A__ : Tuple ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
A__ : Dict =3
A__ : int =do_lower_case
A__ : str =remove_space
A__ : Optional[Any] =keep_accents
A__ : int =vocab_file
A__ : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
A__ : Union[str, Any] =jieba
A__ : List[str] =str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _UpperCAmelCase ( self : Union[str, Any] ):
return len(self.sp_model )
def _UpperCAmelCase ( self : Optional[int] ):
A__ : Any ={self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ):
A__ : Union[str, Any] =self.__dict__.copy()
A__ : Tuple =None
return state
def __setstate__( self : Tuple , UpperCamelCase__ : int ):
A__ : Union[str, Any] =d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A__ : Optional[int] ={}
A__ : Union[str, Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Dict ):
if self.remove_space:
A__ : Optional[int] =" ".join(inputs.strip().split() )
else:
A__ : Optional[Any] =inputs
A__ : Any =outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
A__ : Optional[Any] =unicodedata.normalize("NFKD" , UpperCamelCase__ )
A__ : Tuple ="".join([c for c in outputs if not unicodedata.combining(UpperCamelCase__ )] )
if self.do_lower_case:
A__ : str =outputs.lower()
return outputs
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : str ):
A__ : Optional[int] =self.preprocess_text(UpperCamelCase__ )
A__ : Dict =self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
A__ : List[str] =[]
for piece in pieces:
if len(UpperCamelCase__ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
A__ : str =self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase__ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A__ : Union[str, Any] =cur_pieces[1:]
else:
A__ : List[str] =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCamelCase__ )
else:
new_pieces.append(UpperCamelCase__ )
return new_pieces
def _UpperCAmelCase ( self : int , UpperCamelCase__ : str ):
return self.sp_model.PieceToId(UpperCamelCase__ )
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : List[Any] ):
return self.sp_model.IdToPiece(UpperCamelCase__ )
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : str ):
A__ : Optional[int] ="".join(UpperCamelCase__ ).replace(UpperCamelCase__ , " " ).strip()
return out_string
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : List[str] =[self.sep_token_id]
A__ : str =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is not None:
return ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1, 1]
return ([0] * len(UpperCamelCase__ )) + [1, 1]
def _UpperCAmelCase ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : List[str] =[self.sep_token_id]
A__ : Optional[Any] =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A__ : Tuple =os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
A__ : Optional[Any] =self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
def _UpperCAmelCase ( self : str , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : int ):
A__ : List[Any] =super()._decode(*UpperCamelCase__ , **UpperCamelCase__ )
A__ : Union[str, Any] =text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text
| 656 | 0 |
def __UpperCAmelCase ( __a : Dict ,__a : List[Any] ,__a : Dict ) -> Optional[Any]:
"""simple docstring"""
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__a ,n - 1 ,__a ) * a) % mod
else:
_a : Optional[Any] = binary_exponentiation(__a ,n / 2 ,__a )
return (b * b) % mod
# a prime number
a__ = 701
a__ = 1000000000
a__ = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 14 | """simple docstring"""
def lowercase ( UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ):
"""simple docstring"""
def count_of_possible_combinations(UpperCamelCase : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(UpperCamelCase )
def lowercase ( UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ):
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
UpperCamelCase : int , UpperCamelCase : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
A__ : str =sum(
count_of_possible_combinations_with_dp_array(target - item , UpperCamelCase )
for item in array )
A__ : List[str] =answer
return answer
A__ : List[Any] =[-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(UpperCamelCase , UpperCamelCase )
def lowercase ( UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ):
"""simple docstring"""
A__ : str =[0] * (target + 1)
A__ : Optional[Any] =1
for i in range(1 , target + 1 ):
for j in range(UpperCamelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Optional[Any] = 3
__A : Optional[Any] = 5
__A : int = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 656 | 0 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class A ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__(self : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple=13 , _UpperCAmelCase : str=7 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Any=99 , _UpperCAmelCase : Optional[Any]=32 , _UpperCAmelCase : Optional[Any]=5 , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : Optional[Any]=64 , _UpperCAmelCase : Any="gelu" , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Optional[Any]=512 , _UpperCAmelCase : Any=16 , _UpperCAmelCase : int=2 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Tuple=3 , _UpperCAmelCase : Optional[int]=4 , _UpperCAmelCase : int=None , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : str=2 , _UpperCAmelCase : Dict=2 , _UpperCAmelCase : str=4 , _UpperCAmelCase : Tuple=1 , ) -> List[Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
lowercase__ = q_groups
lowercase__ = k_groups
lowercase__ = v_groups
lowercase__ = post_attention_groups
lowercase__ = intermediate_groups
lowercase__ = output_groups
def lowerCamelCase__ (self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ (self : List[str] ) -> Optional[int]:
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = SqueezeBertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase , _UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
lowercase__ = SqueezeBertForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ (self : str , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = SqueezeBertForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = SqueezeBertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = SqueezeBertForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ (self : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = self.num_choices
lowercase__ = SqueezeBertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ (self : str ) -> str:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__)) = config_and_inputs
lowercase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class A ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
A__ = (
{
'''feature-extraction''': SqueezeBertModel,
'''fill-mask''': SqueezeBertForMaskedLM,
'''question-answering''': SqueezeBertForQuestionAnswering,
'''text-classification''': SqueezeBertForSequenceClassification,
'''token-classification''': SqueezeBertForTokenClassification,
'''zero-shot''': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ = False
A__ = True
A__ = False
def lowerCamelCase__ (self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = SqueezeBertModelTester(self )
lowercase__ = ConfigTester(self , config_class=_UpperCAmelCase , dim=37 )
def lowerCamelCase__ (self : Any ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase__ (self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[Any] ) -> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_UpperCAmelCase )
def lowerCamelCase__ (self : str ) -> Any:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_UpperCAmelCase )
def lowerCamelCase__ (self : str ) -> Tuple:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_UpperCAmelCase )
def lowerCamelCase__ (self : Tuple ) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_UpperCAmelCase )
@slow
def lowerCamelCase__ (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = SqueezeBertModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_torch
class A ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase__ (self : int ) -> Dict:
"""simple docstring"""
lowercase__ = SqueezeBertForSequenceClassification.from_pretrained("""squeezebert/squeezebert-mnli""" )
lowercase__ = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]] )
lowercase__ = model(_UpperCAmelCase )[0]
lowercase__ = torch.Size((1, 3) )
self.assertEqual(output.shape , _UpperCAmelCase )
lowercase__ = torch.tensor([[0.6_401, -0.0_349, -0.6_041]] )
self.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-4 ) )
| 15 | """simple docstring"""
import math
import tensorflow as tf
from packaging import version
def lowercase ( UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A__ : List[Any] =tf.convert_to_tensor(UpperCamelCase )
A__ : List[Any] =0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def lowercase ( UpperCamelCase : Optional[int] ):
"""simple docstring"""
A__ : Optional[Any] =tf.convert_to_tensor(UpperCamelCase )
A__ : Tuple =tf.cast(math.pi , x.dtype )
A__ : Dict =tf.cast(0.04_47_15 , x.dtype )
A__ : Optional[int] =0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(UpperCamelCase , 3 )) ))
return x * cdf
def lowercase ( UpperCamelCase : Optional[int] ):
"""simple docstring"""
A__ : List[str] =tf.convert_to_tensor(UpperCamelCase )
return x * tf.tanh(tf.math.softplus(UpperCamelCase ) )
def lowercase ( UpperCamelCase : List[str] ):
"""simple docstring"""
A__ : Union[str, Any] =tf.convert_to_tensor(UpperCamelCase )
A__ : List[Any] =tf.cast(0.04_47_15 , x.dtype )
A__ : List[Any] =tf.cast(0.79_78_84_56_08 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowercase ( UpperCamelCase : List[Any] ):
"""simple docstring"""
A__ : List[str] =tf.convert_to_tensor(UpperCamelCase )
A__ : str =tf.cast(1.7_02 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowercase ( UpperCamelCase : Tuple ):
"""simple docstring"""
return tf.clip_by_value(_gelu(UpperCamelCase ) , -10 , 10 )
def lowercase ( UpperCamelCase : str , UpperCamelCase : Any=-1 ):
"""simple docstring"""
A__ , A__ : Optional[Any] =tf.split(UpperCamelCase , 2 , axis=UpperCamelCase )
return a * tf.math.sigmoid(UpperCamelCase )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
return tf.keras.activations.gelu(UpperCamelCase , approximate=UpperCamelCase )
__A : Optional[Any] = tf.keras.activations.gelu
__A : Optional[Any] = approximate_gelu_wrap
else:
__A : Any = _gelu
__A : Union[str, Any] = _gelu_new
__A : List[str] = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def lowercase ( UpperCamelCase : List[Any] ):
"""simple docstring"""
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
| 656 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__A : Dict = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = ['OwlViTFeatureExtractor']
__A : str = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 16 | """simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def _UpperCAmelCase ( self : Dict ):
A__ : Optional[Any] =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(UpperCamelCase__ , "num_attention_heads" ) )
self.parent.assertTrue(hasattr(UpperCamelCase__ , "num_encoder_blocks" ) )
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=13 , UpperCamelCase__ : Tuple=64 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : Union[str, Any]=4 , UpperCamelCase__ : Dict=[2, 2, 2, 2] , UpperCamelCase__ : Union[str, Any]=[8, 4, 2, 1] , UpperCamelCase__ : Tuple=[16, 32, 64, 128] , UpperCamelCase__ : Optional[int]=[1, 4, 8, 16] , UpperCamelCase__ : Any=[1, 2, 4, 8] , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Dict="gelu" , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Optional[Any]=None , ):
A__ : Tuple =parent
A__ : List[Any] =batch_size
A__ : List[Any] =image_size
A__ : Union[str, Any] =num_channels
A__ : Optional[int] =num_encoder_blocks
A__ : Any =sr_ratios
A__ : Any =depths
A__ : List[Any] =hidden_sizes
A__ : List[Any] =downsampling_rates
A__ : List[str] =num_attention_heads
A__ : int =is_training
A__ : List[Any] =use_labels
A__ : Any =hidden_act
A__ : Dict =hidden_dropout_prob
A__ : int =attention_probs_dropout_prob
A__ : List[Any] =initializer_range
A__ : Tuple =num_labels
A__ : List[Any] =scope
def _UpperCAmelCase ( self : Optional[int] ):
A__ : List[str] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ : Any =None
if self.use_labels:
A__ : Tuple =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A__ : List[Any] =self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self : Tuple ):
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ):
A__ : Any =SegformerModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Dict =model(UpperCamelCase__ )
A__ : Optional[int] =self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def _UpperCAmelCase ( self : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ):
A__ : str =self.num_labels
A__ : Optional[Any] =SegformerForSemanticSegmentation(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Optional[Any] =model(UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
A__ : List[Any] =model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def _UpperCAmelCase ( self : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ):
A__ : Tuple =1
A__ : Tuple =SegformerForSemanticSegmentation(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : List[str] =torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(UpperCamelCase__ )
A__ : Dict =model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertGreater(result.loss , 0.0 )
def _UpperCAmelCase ( self : str ):
A__ : Union[str, Any] =self.prepare_config_and_inputs()
A__ , A__ , A__ : Tuple =config_and_inputs
A__ : Tuple ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Dict = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__magic_name__ : Optional[int] = (
{
"""feature-extraction""": SegformerModel,
"""image-classification""": SegformerForImageClassification,
"""image-segmentation""": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__magic_name__ : Dict = True
__magic_name__ : List[str] = False
__magic_name__ : Optional[Any] = False
__magic_name__ : str = False
def _UpperCAmelCase ( self : Union[str, Any] ):
A__ : Union[str, Any] =SegformerModelTester(self )
A__ : Tuple =SegformerConfigTester(self , config_class=UpperCamelCase__ )
def _UpperCAmelCase ( self : str ):
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : Dict ):
A__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _UpperCAmelCase ( self : Tuple ):
A__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*UpperCamelCase__ )
def _UpperCAmelCase ( self : Union[str, Any] ):
A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*UpperCamelCase__ )
@unittest.skip("SegFormer does not use inputs_embeds" )
def _UpperCAmelCase ( self : Dict ):
pass
@unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods" )
def _UpperCAmelCase ( self : Tuple ):
pass
def _UpperCAmelCase ( self : List[str] ):
A__ , A__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : int =model_class(UpperCamelCase__ )
A__ : Optional[int] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : Optional[int] =[*signature.parameters.keys()]
A__ : List[str] =["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def _UpperCAmelCase ( self : str ):
A__ , A__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
A__ : Union[str, Any] =True
for model_class in self.all_model_classes:
A__ : Optional[Any] =True
A__ : Union[str, Any] =False
A__ : str =True
A__ : Optional[int] =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : str =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Any =outputs.attentions
A__ : List[str] =sum(self.model_tester.depths )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ : Dict =True
A__ : str =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Any =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Union[str, Any] =outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# verify the first attentions (first block, first layer)
A__ : List[Any] =(self.model_tester.image_size // 4) ** 2
A__ : Tuple =(self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
A__ : Tuple =(self.model_tester.image_size // 32) ** 2
A__ : Optional[Any] =(self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
A__ : int =len(UpperCamelCase__ )
# Check attention is always last and order is fine
A__ : Optional[Any] =True
A__ : Any =True
A__ : Union[str, Any] =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Optional[Any] =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + 1 , len(UpperCamelCase__ ) )
A__ : Optional[Any] =outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# verify the first attentions (first block, first layer)
A__ : Union[str, Any] =(self.model_tester.image_size // 4) ** 2
A__ : Tuple =(self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _UpperCAmelCase ( self : List[Any] ):
def check_hidden_states_output(UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ):
A__ : Optional[Any] =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : List[Any] =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Optional[Any] =outputs.hidden_states
A__ : int =self.model_tester.num_encoder_blocks
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
A__ , A__ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Optional[Any] =True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ : str =True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _UpperCAmelCase ( self : Optional[int] ):
if not self.model_tester.is_training:
return
A__ , A__ : int =self.model_tester.prepare_config_and_inputs_for_common()
A__ : List[Any] =True
for model_class in self.all_model_classes:
if model_class in get_values(UpperCamelCase__ ):
continue
A__ : List[Any] =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
A__ : int =self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
A__ : Union[str, Any] =model(**UpperCamelCase__ ).loss
loss.backward()
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _UpperCAmelCase ( self : Tuple ):
pass
@slow
def _UpperCAmelCase ( self : Tuple ):
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Tuple =SegformerModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def lowercase ( ):
"""simple docstring"""
A__ : List[Any] =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
@slow
def _UpperCAmelCase ( self : Tuple ):
# only resize + normalize
A__ : List[Any] =SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCamelCase__ , align=UpperCamelCase__ , do_random_crop=UpperCamelCase__ )
A__ : Union[str, Any] =SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
UpperCamelCase__ )
A__ : Union[str, Any] =prepare_img()
A__ : Union[str, Any] =image_processor(images=UpperCamelCase__ , return_tensors="pt" )
A__ : int =encoded_inputs.pixel_values.to(UpperCamelCase__ )
with torch.no_grad():
A__ : int =model(UpperCamelCase__ )
A__ : Dict =torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A__ : Optional[int] =torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
@slow
def _UpperCAmelCase ( self : Union[str, Any] ):
# only resize + normalize
A__ : Dict =SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCamelCase__ , align=UpperCamelCase__ , do_random_crop=UpperCamelCase__ )
A__ : int =SegformerForSemanticSegmentation.from_pretrained(
"nvidia/segformer-b1-finetuned-cityscapes-1024-1024" ).to(UpperCamelCase__ )
A__ : Tuple =prepare_img()
A__ : str =image_processor(images=UpperCamelCase__ , return_tensors="pt" )
A__ : Optional[int] =encoded_inputs.pixel_values.to(UpperCamelCase__ )
with torch.no_grad():
A__ : int =model(UpperCamelCase__ )
A__ : List[str] =torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A__ : List[Any] =torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1E-1 ) )
@slow
def _UpperCAmelCase ( self : int ):
# only resize + normalize
A__ : Optional[Any] =SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCamelCase__ , align=UpperCamelCase__ , do_random_crop=UpperCamelCase__ )
A__ : List[Any] =SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
UpperCamelCase__ )
A__ : str =prepare_img()
A__ : Dict =image_processor(images=UpperCamelCase__ , return_tensors="pt" )
A__ : Any =encoded_inputs.pixel_values.to(UpperCamelCase__ )
with torch.no_grad():
A__ : Dict =model(UpperCamelCase__ )
A__ : Any =outputs.logits.detach().cpu()
A__ : Union[str, Any] =image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(500, 300)] )
A__ : List[str] =torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
A__ : int =image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ )
A__ : Tuple =torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
| 656 | 0 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def __SCREAMING_SNAKE_CASE ( a__ : dict ,a__ : str ,a__ : set ,a__ : set ,a__ : dict ,a__ : dict ,a__ : PriorityQueue ,a__ : dict ,a__ : float | int ,) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
__A : Tuple = cst_fwd.get(a__ ,np.inf )
__A : int = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
__A : List[Any] = new_cost_f
__A : Any = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
__A : Optional[Any] = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ,a__ : dict ,a__ : dict ) -> int:
__A : int = -1
__A : List[str] = set()
__A : List[Any] = set()
__A : str = {source: 0}
__A : List[str] = {destination: 0}
__A : int = {source: None}
__A : Tuple = {destination: None}
__A : PriorityQueue[Any] = PriorityQueue()
__A : PriorityQueue[Any] = PriorityQueue()
__A : str = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
__A , __A : Union[str, Any] = queue_forward.get()
visited_forward.add(a__ )
__A , __A : Optional[Any] = queue_backward.get()
visited_backward.add(a__ )
__A : int = pass_and_relaxation(
a__ ,a__ ,a__ ,a__ ,a__ ,a__ ,a__ ,a__ ,a__ ,)
__A : List[Any] = pass_and_relaxation(
a__ ,a__ ,a__ ,a__ ,a__ ,a__ ,a__ ,a__ ,a__ ,)
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
__A : Optional[int] = shortest_distance
return shortest_path_distance
UpperCAmelCase_ : Union[str, Any] = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
UpperCAmelCase_ : Any = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | """simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any]=13 , UpperCamelCase__ : Optional[int]=7 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : List[str]=99 , UpperCamelCase__ : Optional[Any]=32 , UpperCamelCase__ : Any=5 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : Union[str, Any]=37 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Optional[Any]=512 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : List[Any]=4 , ):
A__ : str =parent
A__ : List[str] =batch_size
A__ : Any =seq_length
A__ : List[str] =is_training
A__ : List[Any] =use_attention_mask
A__ : List[Any] =use_token_type_ids
A__ : Dict =use_labels
A__ : List[Any] =vocab_size
A__ : Optional[int] =hidden_size
A__ : Optional[Any] =num_hidden_layers
A__ : str =num_attention_heads
A__ : int =intermediate_size
A__ : Tuple =hidden_act
A__ : Tuple =hidden_dropout_prob
A__ : Dict =attention_probs_dropout_prob
A__ : Any =max_position_embeddings
A__ : Any =type_vocab_size
A__ : Union[str, Any] =type_sequence_label_size
A__ : Optional[Any] =initializer_range
A__ : int =num_choices
def _UpperCAmelCase ( self : Tuple ):
A__ : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : List[str] =None
if self.use_attention_mask:
A__ : Optional[int] =random_attention_mask([self.batch_size, self.seq_length] )
A__ : str =None
if self.use_token_type_ids:
A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ : Any =RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCAmelCase ( self : Tuple ):
A__ : Dict =self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ : str =config_and_inputs
A__ : Optional[Any] ={"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def _UpperCAmelCase ( self : int ):
A__ : str =self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ : Union[str, Any] =config_and_inputs
A__ : Union[str, Any] =True
A__ : List[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class __lowerCAmelCase ( _UpperCamelCase , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Union[str, Any] = True
__magic_name__ : Dict = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _UpperCAmelCase ( self : Optional[int] ):
A__ : Optional[int] =FlaxRobertaPreLayerNormModelTester(self )
@slow
def _UpperCAmelCase ( self : List[Any] ):
for model_class_name in self.all_model_classes:
A__ : Tuple =model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=UpperCamelCase__ )
A__ : Union[str, Any] =model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase__ )
@require_flax
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
@slow
def _UpperCAmelCase ( self : Tuple ):
A__ : Any =FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=UpperCamelCase__ )
A__ : Tuple =np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa )
A__ : str =model(UpperCamelCase__ )[0]
A__ : List[Any] =[1, 11, 50265]
self.assertEqual(list(output.shape ) , UpperCamelCase__ )
# compare the actual values for a slice.
A__ : Any =np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
@slow
def _UpperCAmelCase ( self : List[Any] ):
A__ : Union[str, Any] =FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=UpperCamelCase__ )
A__ : List[Any] =np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa )
A__ : Dict =model(UpperCamelCase__ )[0]
# compare the actual values for a slice.
A__ : Optional[Any] =np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
| 656 | 0 |
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
_lowerCAmelCase = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
_lowerCAmelCase = n - k
# Calculate C(n,k)
for i in range(SCREAMING_SNAKE_CASE_ ):
result *= n - i
result //= i + 1
return result
def __a(SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
return binomial_coefficient(2 * node_count , SCREAMING_SNAKE_CASE_ ) // (node_count + 1)
def __a(SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if n < 0:
raise ValueError("factorial() not defined for negative values" )
_lowerCAmelCase = 1
for i in range(1 , n + 1 ):
result *= i
return result
def __a(SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
return catalan_number(SCREAMING_SNAKE_CASE_ ) * factorial(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = int(input("Enter the number of nodes: ").strip() or 0)
if node_count <= 0:
raise ValueError("We need some nodes to work with.")
print(
f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
f'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 18 | """simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : List[Any] = logging.get_logger(__name__)
__A : Any = [
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
__A : Optional[int] = [
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def lowercase ( UpperCamelCase : Tuple ):
"""simple docstring"""
A__ : Union[str, Any] =torch.load(UpperCamelCase , map_location="cpu" )
return sd
def lowercase ( UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : int=rename_keys_prefix ):
"""simple docstring"""
A__ : List[str] =OrderedDict()
A__ : str =torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
A__ : Optional[Any] =key
for name_pair in rename_keys_prefix:
A__ : int =new_key.replace(name_pair[0] , name_pair[1] )
A__ : Dict =d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
A__ : Optional[int] =new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def lowercase ( UpperCamelCase : Dict , UpperCamelCase : List[str] ):
"""simple docstring"""
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
A__ : Any ="pretraining"
if "vcr" in checkpoint_path:
A__ : Union[str, Any] ={"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
A__ : Optional[Any] ={"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
A__ : Optional[int] ={"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
A__ : List[str] ={"visual_embedding_dim": 1024}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
A__ : Optional[int] ={"visual_embedding_dim": 512}
A__ : List[str] ="multichoice"
elif "vqa_advanced" in checkpoint_path:
A__ : Any ={"visual_embedding_dim": 2048}
A__ : str ="vqa_advanced"
elif "vqa" in checkpoint_path:
A__ : Optional[int] ={"visual_embedding_dim": 2048, "num_labels": 3129}
A__ : str ="vqa"
elif "nlvr" in checkpoint_path:
A__ : str ={
"visual_embedding_dim": 1024,
"num_labels": 2,
}
A__ : Dict ="nlvr"
A__ : Union[str, Any] =VisualBertConfig(**UpperCamelCase )
# Load State Dict
A__ : int =load_state_dict(UpperCamelCase )
A__ : Tuple =get_new_dict(UpperCamelCase , UpperCamelCase )
if model_type == "pretraining":
A__ : str =VisualBertForPreTraining(UpperCamelCase )
elif model_type == "vqa":
A__ : Optional[int] =VisualBertForQuestionAnswering(UpperCamelCase )
elif model_type == "nlvr":
A__ : Union[str, Any] =VisualBertForVisualReasoning(UpperCamelCase )
elif model_type == "multichoice":
A__ : Union[str, Any] =VisualBertForMultipleChoice(UpperCamelCase )
model.load_state_dict(UpperCamelCase )
# Save Checkpoints
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
model.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
__A : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
__A : str = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 656 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'sew-d'
def __init__( self , __a=32 , __a=7_68 , __a=12 , __a=12 , __a=30_72 , __a=2 , __a=5_12 , __a=2_56 , __a=True , __a=True , __a=("p2c", "c2p") , __a="layer_norm" , __a="gelu_python" , __a=0.1 , __a=0.1 , __a=0.1 , __a=0.0 , __a=0.1 , __a=0.02 , __a=1e-7 , __a=1e-5 , __a="group" , __a="gelu" , __a=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , __a=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __a=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __a=False , __a=1_28 , __a=16 , __a=True , __a=0.05 , __a=10 , __a=2 , __a=0.0 , __a=10 , __a=0 , __a="mean" , __a=False , __a=False , __a=2_56 , __a=0 , __a=1 , __a=2 , **__a , ) -> int:
'''simple docstring'''
super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a)
_UpperCamelCase = hidden_size
_UpperCamelCase = feat_extract_norm
_UpperCamelCase = feat_extract_activation
_UpperCamelCase = list(__a)
_UpperCamelCase = list(__a)
_UpperCamelCase = list(__a)
_UpperCamelCase = conv_bias
_UpperCamelCase = num_conv_pos_embeddings
_UpperCamelCase = num_conv_pos_embedding_groups
_UpperCamelCase = len(self.conv_dim)
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = intermediate_size
_UpperCamelCase = squeeze_factor
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = position_buckets
_UpperCamelCase = share_att_key
_UpperCamelCase = relative_attention
_UpperCamelCase = norm_rel_ebd
_UpperCamelCase = list(__a)
_UpperCamelCase = hidden_act
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = feat_proj_dropout
_UpperCamelCase = final_dropout
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = feature_layer_norm_eps
_UpperCamelCase = initializer_range
_UpperCamelCase = vocab_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F'''but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.''')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCamelCase = apply_spec_augment
_UpperCamelCase = mask_time_prob
_UpperCamelCase = mask_time_length
_UpperCamelCase = mask_time_min_masks
_UpperCamelCase = mask_feature_prob
_UpperCamelCase = mask_feature_length
_UpperCamelCase = mask_feature_min_masks
# ctc loss
_UpperCamelCase = ctc_loss_reduction
_UpperCamelCase = ctc_zero_infinity
# sequence classification
_UpperCamelCase = use_weighted_layer_sum
_UpperCamelCase = classifier_proj_size
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 19 | """simple docstring"""
__A : Union[str, Any] = {str(digit): digit**5 for digit in range(10)}
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(UpperCamelCase ) )
def lowercase ( ):
"""simple docstring"""
return sum(
number
for number in range(1000 , 1000000 )
if number == digits_fifth_powers_sum(UpperCamelCase ) )
if __name__ == "__main__":
print(solution())
| 656 | 0 |
def _lowercase( __a : int ):
if upper_limit < 0:
raise ValueError('Limit for the Catalan sequence must be ≥ 0' )
a__ =[0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
a__ =1
if upper_limit > 0:
a__ =1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(__a ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
_lowerCAmelCase: str = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(F"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 20 | """simple docstring"""
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
__A : Optional[Any] = logging.get_logger(__name__)
# General docstring
__A : str = "PoolFormerConfig"
# Base docstring
__A : Optional[Any] = "sail/poolformer_s12"
__A : List[Any] = [1, 512, 7, 7]
# Image classification docstring
__A : List[str] = "sail/poolformer_s12"
__A : Tuple = "tabby, tabby cat"
__A : Tuple = [
"sail/poolformer_s12",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def lowercase ( UpperCamelCase : Any , UpperCamelCase : float = 0.0 , UpperCamelCase : bool = False ):
"""simple docstring"""
if drop_prob == 0.0 or not training:
return input
A__ : Tuple =1 - drop_prob
A__ : List[str] =(input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
A__ : Any =keep_prob + torch.rand(UpperCamelCase , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
A__ : Optional[int] =input.div(UpperCamelCase ) * random_tensor
return output
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : Optional[float] = None ):
super().__init__()
A__ : Optional[int] =drop_prob
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : torch.Tensor ):
return drop_path(UpperCamelCase__ , self.drop_prob , self.training )
def _UpperCAmelCase ( self : List[str] ):
return "p={}".format(self.drop_prob )
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int=None ):
super().__init__()
A__ : Optional[int] =patch_size if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (patch_size, patch_size)
A__ : Optional[int] =stride if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (stride, stride)
A__ : int =padding if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (padding, padding)
A__ : Any =nn.Convad(UpperCamelCase__ , UpperCamelCase__ , kernel_size=UpperCamelCase__ , stride=UpperCamelCase__ , padding=UpperCamelCase__ )
A__ : Any =norm_layer(UpperCamelCase__ ) if norm_layer else nn.Identity()
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : str ):
A__ : List[str] =self.projection(UpperCamelCase__ )
A__ : Any =self.norm(UpperCamelCase__ )
return embeddings
class __lowerCAmelCase ( nn.GroupNorm):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : Dict , **UpperCamelCase__ : Union[str, Any] ):
super().__init__(1 , UpperCamelCase__ , **UpperCamelCase__ )
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : Optional[int] ):
super().__init__()
A__ : Any =nn.AvgPoolad(UpperCamelCase__ , stride=1 , padding=pool_size // 2 , count_include_pad=UpperCamelCase__ )
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : List[str] ):
return self.pool(UpperCamelCase__ ) - hidden_states
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] ):
super().__init__()
A__ : List[Any] =nn.Convad(UpperCamelCase__ , UpperCamelCase__ , 1 )
A__ : Union[str, Any] =nn.Convad(UpperCamelCase__ , UpperCamelCase__ , 1 )
A__ : Dict =PoolFormerDropPath(UpperCamelCase__ )
if isinstance(config.hidden_act , UpperCamelCase__ ):
A__ : Tuple =ACTaFN[config.hidden_act]
else:
A__ : Optional[Any] =config.hidden_act
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : Dict ):
A__ : Optional[Any] =self.conva(UpperCamelCase__ )
A__ : List[str] =self.act_fn(UpperCamelCase__ )
A__ : List[str] =self.drop(UpperCamelCase__ )
A__ : Optional[int] =self.conva(UpperCamelCase__ )
A__ : Optional[Any] =self.drop(UpperCamelCase__ )
return hidden_states
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any ):
super().__init__()
A__ : Optional[int] =PoolFormerPooling(UpperCamelCase__ )
A__ : List[str] =PoolFormerOutput(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A__ : int =PoolFormerGroupNorm(UpperCamelCase__ )
A__ : int =PoolFormerGroupNorm(UpperCamelCase__ )
# Useful for training neural nets
A__ : Tuple =PoolFormerDropPath(UpperCamelCase__ ) if drop_path > 0.0 else nn.Identity()
A__ : Optional[Any] =config.use_layer_scale
if config.use_layer_scale:
A__ : List[str] =nn.Parameter(
config.layer_scale_init_value * torch.ones((UpperCamelCase__) ) , requires_grad=UpperCamelCase__ )
A__ : List[Any] =nn.Parameter(
config.layer_scale_init_value * torch.ones((UpperCamelCase__) ) , requires_grad=UpperCamelCase__ )
def _UpperCAmelCase ( self : Any , UpperCamelCase__ : Optional[int] ):
if self.use_layer_scale:
A__ : Optional[int] =self.pooling(self.before_norm(UpperCamelCase__ ) )
A__ : Union[str, Any] =self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
A__ : Union[str, Any] =hidden_states + self.drop_path(UpperCamelCase__ )
A__ : Tuple =()
A__ : List[str] =self.output(self.after_norm(UpperCamelCase__ ) )
A__ : Optional[Any] =self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
A__ : str =hidden_states + self.drop_path(UpperCamelCase__ )
A__ : List[Any] =(output,) + outputs
return outputs
else:
A__ : Tuple =self.drop_path(self.pooling(self.before_norm(UpperCamelCase__ ) ) )
# First residual connection
A__ : Optional[Any] =pooling_output + hidden_states
A__ : Tuple =()
# Second residual connection inside the PoolFormerOutput block
A__ : List[str] =self.drop_path(self.output(self.after_norm(UpperCamelCase__ ) ) )
A__ : Any =hidden_states + layer_output
A__ : Tuple =(output,) + outputs
return outputs
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : List[str] ):
super().__init__()
A__ : Tuple =config
# stochastic depth decay rule
A__ : Dict =[x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
A__ : Tuple =[]
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
A__ : List[str] =nn.ModuleList(UpperCamelCase__ )
# Transformer blocks
A__ : Union[str, Any] =[]
A__ : Any =0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
A__ : Union[str, Any] =[]
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
UpperCamelCase__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(UpperCamelCase__ ) )
A__ : str =nn.ModuleList(UpperCamelCase__ )
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Optional[int]=True ):
A__ : Union[str, Any] =() if output_hidden_states else None
A__ : Dict =pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
A__ , A__ : List[Any] =layers
# Get patch embeddings from hidden_states
A__ : Any =embedding_layer(UpperCamelCase__ )
# Send the embeddings through the blocks
for _, blk in enumerate(UpperCamelCase__ ):
A__ : List[str] =blk(UpperCamelCase__ )
A__ : Tuple =layer_outputs[0]
if output_hidden_states:
A__ : List[Any] =all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=UpperCamelCase__ , hidden_states=UpperCamelCase__ )
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : List[str] = PoolFormerConfig
__magic_name__ : int = """poolformer"""
__magic_name__ : Any = """pixel_values"""
__magic_name__ : Any = True
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : str ):
if isinstance(UpperCamelCase__ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCamelCase__ , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any]=False ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Optional[Any] =value
__A : Optional[int] = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__A : Dict = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n"
@add_start_docstrings(
"""The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.""" , _UpperCamelCase , )
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : Dict ):
super().__init__(UpperCamelCase__ )
A__ : List[Any] =config
A__ : Optional[Any] =PoolFormerEncoder(UpperCamelCase__ )
# Initialize weights and apply final processing
self.post_init()
def _UpperCAmelCase ( self : Tuple ):
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(UpperCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _UpperCAmelCase ( self : str , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , ):
A__ : int =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A__ : Optional[int] =return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
A__ : List[Any] =self.encoder(
UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , return_dict=UpperCamelCase__ , )
A__ : int =encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=UpperCamelCase__ , hidden_states=encoder_outputs.hidden_states , )
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Optional[Any] ):
super().__init__()
A__ : List[str] =nn.Linear(config.hidden_size , config.hidden_size )
def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : List[Any] ):
A__ : int =self.dense(UpperCamelCase__ )
return output
@add_start_docstrings(
"""
PoolFormer Model transformer with an image classification head on top
""" , _UpperCamelCase , )
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : str ):
super().__init__(UpperCamelCase__ )
A__ : List[str] =config.num_labels
A__ : Optional[int] =PoolFormerModel(UpperCamelCase__ )
# Final norm
A__ : Dict =PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
A__ : Dict =(
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , ):
A__ : Tuple =return_dict if return_dict is not None else self.config.use_return_dict
A__ : List[str] =self.poolformer(
UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , return_dict=UpperCamelCase__ , )
A__ : str =outputs[0]
A__ : List[Any] =self.classifier(self.norm(UpperCamelCase__ ).mean([-2, -1] ) )
A__ : Optional[Any] =None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A__ : int ="regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A__ : Tuple ="single_label_classification"
else:
A__ : Optional[int] ="multi_label_classification"
if self.config.problem_type == "regression":
A__ : Dict =MSELoss()
if self.num_labels == 1:
A__ : Optional[Any] =loss_fct(logits.squeeze() , labels.squeeze() )
else:
A__ : List[str] =loss_fct(UpperCamelCase__ , UpperCamelCase__ )
elif self.config.problem_type == "single_label_classification":
A__ : Tuple =CrossEntropyLoss()
A__ : int =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A__ : List[Any] =BCEWithLogitsLoss()
A__ : str =loss_fct(UpperCamelCase__ , UpperCamelCase__ )
if not return_dict:
A__ : Optional[int] =(logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=UpperCamelCase__ , logits=UpperCamelCase__ , hidden_states=outputs.hidden_states )
| 656 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : Tuple = {"configuration_opt": ["OPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "OPTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = [
"OPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OPTForCausalLM",
"OPTModel",
"OPTPreTrainedModel",
"OPTForSequenceClassification",
"OPTForQuestionAnswering",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = ["TFOPTForCausalLM", "TFOPTModel", "TFOPTPreTrainedModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = [
"FlaxOPTForCausalLM",
"FlaxOPTModel",
"FlaxOPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
UpperCAmelCase_ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 21 | """simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
'''simple docstring'''
__magic_name__ : int = IFInpaintingSuperResolutionPipeline
__magic_name__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
__magic_name__ : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""})
__magic_name__ : Optional[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def _UpperCAmelCase ( self : Union[str, Any] ):
return self._get_superresolution_dummy_components()
def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int]=0 ):
if str(UpperCamelCase__ ).startswith("mps" ):
A__ : Any =torch.manual_seed(UpperCamelCase__ )
else:
A__ : Dict =torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
A__ : Tuple =floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
A__ : Optional[int] =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
A__ : Any =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
A__ : List[str] ={
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _UpperCAmelCase ( self : Dict ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _UpperCAmelCase ( self : int ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def _UpperCAmelCase ( self : Tuple ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _UpperCAmelCase ( self : str ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _UpperCAmelCase ( self : Dict ):
self._test_save_load_local()
def _UpperCAmelCase ( self : Optional[int] ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 656 | 0 |
'''simple docstring'''
def snake_case_ (UpperCamelCase : list ):
'''simple docstring'''
if len(UpperCamelCase ) <= 1:
return lst
_a = 1
while i < len(UpperCamelCase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_a , _a = lst[i], lst[i - 1]
i -= 1
if i == 0:
_a = 1
return lst
if __name__ == "__main__":
_snake_case : Optional[int] = input('Enter numbers separated by a comma:\n').strip()
_snake_case : Union[str, Any] = [int(item) for item in user_input.split(',')]
print(gnome_sort(unsorted))
| 22 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__A : Any = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
__A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 656 | 0 |
import datasets
from .evaluate import evaluate
snake_case__ : int = """\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
"""
snake_case__ : Union[str, Any] = """
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
"""
snake_case__ : Any = """
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the CUAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
'aupr': Area Under the Precision-Recall curve
'prec_at_80_recall': Precision at 80% recall
'prec_at_90_recall': Precision at 90% recall
Examples:
>>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> cuad_metric = datasets.load_metric(\"cuad\")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': {
'id': datasets.Value('string' ),
'prediction_text': datasets.features.Sequence(datasets.Value('string' ) ),
},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ) , codebase_urls=['https://www.atticusprojectai.org/cuad'] , reference_urls=['https://www.atticusprojectai.org/cuad'] , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
UpperCamelCase_ = {prediction['id']: prediction['prediction_text'] for prediction in predictions}
UpperCamelCase_ = [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
UpperCamelCase_ = evaluate(dataset=_UpperCAmelCase , predictions=_UpperCAmelCase )
return score
| 23 | """simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowercase ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any]=10 ):
"""simple docstring"""
A__ : Tuple =[]
for _ in range(UpperCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowercase ( UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any]=10 ):
"""simple docstring"""
A__ : Dict =[]
for step in range(UpperCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : List[Any] =os.path.join(UpperCamelCase , "schedule.bin" )
torch.save(scheduler.state_dict() , UpperCamelCase )
A__ : Dict =torch.load(UpperCamelCase )
scheduler.load_state_dict(UpperCamelCase )
return lrs
@require_torch
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int ):
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for a, b in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertAlmostEqual(UpperCamelCase__ , UpperCamelCase__ , delta=UpperCamelCase__ )
def _UpperCAmelCase ( self : Tuple ):
A__ : Any =torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase__ )
A__ : Optional[Any] =torch.tensor([0.4, 0.2, -0.5] )
A__ : Any =nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A__ : List[str] =AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
A__ : Optional[int] =criterion(UpperCamelCase__ , UpperCamelCase__ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def _UpperCAmelCase ( self : Dict ):
A__ : Optional[int] =torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase__ )
A__ : Dict =torch.tensor([0.4, 0.2, -0.5] )
A__ : Optional[int] =nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A__ : int =Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCamelCase__ , weight_decay=0.0 , relative_step=UpperCamelCase__ , scale_parameter=UpperCamelCase__ , warmup_init=UpperCamelCase__ , )
for _ in range(1000 ):
A__ : List[Any] =criterion(UpperCamelCase__ , UpperCamelCase__ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
__magic_name__ : Optional[int] = nn.Linear(50 , 50) if is_torch_available() else None
__magic_name__ : Any = AdamW(m.parameters() , lr=10.0) if is_torch_available() else None
__magic_name__ : Union[str, Any] = 10
def _UpperCAmelCase ( self : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int=None ):
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for a, b in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertAlmostEqual(UpperCamelCase__ , UpperCamelCase__ , delta=UpperCamelCase__ , msg=UpperCamelCase__ )
def _UpperCAmelCase ( self : Optional[Any] ):
A__ : Union[str, Any] ={"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
A__ : Union[str, Any] ={
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
A__ , A__ : Any =data
A__ : Union[str, Any] =scheduler_func(self.optimizer , **UpperCamelCase__ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
A__ : int =unwrap_schedule(UpperCamelCase__ , self.num_steps )
self.assertListAlmostEqual(
UpperCamelCase__ , UpperCamelCase__ , tol=1E-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , )
A__ : List[str] =scheduler_func(self.optimizer , **UpperCamelCase__ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(UpperCamelCase__ ) # wrap to test picklability of the schedule
A__ : Tuple =unwrap_and_save_reload_schedule(UpperCamelCase__ , self.num_steps )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ , msg=F'''failed for {scheduler_func} in save and reload''' )
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : str ):
A__ : int =fn
def __call__( self : List[Any] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[Any] ):
return self.fn(*UpperCamelCase__ , **UpperCamelCase__ )
@classmethod
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : Dict ):
A__ : str =list(map(self , scheduler.lr_lambdas ) )
| 656 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : Dict = DiTPipeline
__lowercase : List[str] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
__lowercase : Optional[int] = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
__lowercase : str = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
__lowercase : Any = False
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=__SCREAMING_SNAKE_CASE , activation_fn='''gelu-approximate''' , num_embeds_ada_norm=1000 , norm_type='''ada_norm_zero''' , norm_elementwise_affine=__SCREAMING_SNAKE_CASE , )
__snake_case = AutoencoderKL()
__snake_case = DDIMScheduler()
__snake_case = {'''transformer''': transformer.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler}
return components
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ) -> List[Any]:
'''simple docstring'''
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
__snake_case = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__snake_case = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__snake_case = {
'''class_labels''': [1],
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = '''cpu'''
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__snake_case = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__snake_case = pipe(**__SCREAMING_SNAKE_CASE ).images
__snake_case = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__snake_case = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
__snake_case = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1E-3 )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
self._test_inference_batch_single_identical(relax_max_difference=__SCREAMING_SNAKE_CASE , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class lowerCAmelCase ( unittest.TestCase):
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = torch.manual_seed(0 )
__snake_case = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-256''' )
pipe.to('''cuda''' )
__snake_case = ['''vase''', '''umbrella''', '''white shark''', '''white wolf''']
__snake_case = pipe.get_label_ids(__SCREAMING_SNAKE_CASE )
__snake_case = pipe(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type='''np''' ).images
for word, image in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = load_numpy(
F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-2
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-512''' )
__snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('''cuda''' )
__snake_case = ['''vase''', '''umbrella''']
__snake_case = pipe.get_label_ids(__SCREAMING_SNAKE_CASE )
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type='''np''' ).images
for word, image in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
F'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 24 | """simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__A : List[Any] = logging.get_logger("transformers.models.speecht5")
__A : Optional[Any] = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
__A : Optional[int] = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
__A : List[str] = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
__A : List[Any] = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
__A : Union[str, Any] = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
__A : Any = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
__A : Union[str, Any] = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
__A : Optional[int] = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
__A : Union[str, Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__A : Optional[Any] = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__A : Optional[int] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__A : int = []
__A : int = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
__A : Optional[Any] = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
__A : Tuple = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
__A : Union[str, Any] = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def lowercase ( UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : List[Any] , UpperCamelCase : int ):
"""simple docstring"""
for attribute in key.split("." ):
A__ : Dict =getattr(UpperCamelCase , UpperCamelCase )
if weight_type is not None:
A__ : Union[str, Any] =getattr(UpperCamelCase , UpperCamelCase ).shape
else:
A__ : Tuple =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
A__ : Any =value
elif weight_type == "weight_g":
A__ : Any =value
elif weight_type == "weight_v":
A__ : Any =value
elif weight_type == "bias":
A__ : Tuple =value
elif weight_type == "running_mean":
A__ : Dict =value
elif weight_type == "running_var":
A__ : List[str] =value
elif weight_type == "num_batches_tracked":
A__ : Dict =value
else:
A__ : Optional[int] =value
logger.info(F'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' )
def lowercase ( UpperCamelCase : Tuple , UpperCamelCase : Tuple ):
"""simple docstring"""
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
A__ , A__ : List[str] =key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowercase ( UpperCamelCase : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : Dict ):
"""simple docstring"""
A__ : Tuple =[]
if task == "s2t":
A__ : Dict =hf_model.speechta.encoder.prenet.feature_encoder
A__ : int =MAPPING_S2T
A__ : List[Any] =IGNORE_KEYS_S2T
elif task == "t2s":
A__ : Union[str, Any] =None
A__ : List[Any] =MAPPING_T2S
A__ : Tuple =IGNORE_KEYS_T2S
elif task == "s2s":
A__ : Optional[Any] =hf_model.speechta.encoder.prenet.feature_encoder
A__ : Tuple =MAPPING_S2S
A__ : Any =IGNORE_KEYS_S2S
else:
raise ValueError(F'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(UpperCamelCase , UpperCamelCase ):
logger.info(F'''{name} was ignored''' )
continue
A__ : Optional[Any] =False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , hf_model.config.feat_extract_norm == "group" , )
A__ : List[Any] =True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
A__ , A__ : Dict =key.split(".*." )
if prefix in name and suffix in name:
A__ : int =suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
A__ : List[Any] =True
if "*" in mapped_key:
A__ : Optional[int] =name.split(UpperCamelCase )[0].split("." )[-2]
A__ : int =mapped_key.replace("*" , UpperCamelCase )
if "weight_g" in name:
A__ : str ="weight_g"
elif "weight_v" in name:
A__ : Optional[Any] ="weight_v"
elif "bias" in name:
A__ : Any ="bias"
elif "weight" in name:
A__ : Optional[int] ="weight"
elif "running_mean" in name:
A__ : Tuple ="running_mean"
elif "running_var" in name:
A__ : Optional[int] ="running_var"
elif "num_batches_tracked" in name:
A__ : str ="num_batches_tracked"
else:
A__ : List[Any] =None
set_recursively(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
continue
if not is_used:
unused_weights.append(UpperCamelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowercase ( UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : Dict ):
"""simple docstring"""
A__ : Any =full_name.split("conv_layers." )[-1]
A__ : Dict =name.split("." )
A__ : int =int(items[0] )
A__ : str =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
A__ : Optional[Any] =value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
A__ : Optional[int] =value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
A__ : Any =value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
A__ : Any =value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCamelCase )
@torch.no_grad()
def lowercase ( UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str] , UpperCamelCase : str=None , UpperCamelCase : Any=None , UpperCamelCase : Tuple=None , ):
"""simple docstring"""
if config_path is not None:
A__ : Any =SpeechTaConfig.from_pretrained(UpperCamelCase )
else:
A__ : Any =SpeechTaConfig()
if task == "s2t":
A__ : Union[str, Any] =config.max_text_positions
A__ : Dict =SpeechTaForSpeechToText(UpperCamelCase )
elif task == "t2s":
A__ : str =1876
A__ : Optional[int] =600
A__ : Tuple =config.max_speech_positions
A__ : Optional[Any] =SpeechTaForTextToSpeech(UpperCamelCase )
elif task == "s2s":
A__ : str =1876
A__ : Tuple =config.max_speech_positions
A__ : Any =SpeechTaForSpeechToSpeech(UpperCamelCase )
else:
raise ValueError(F'''Unknown task name: {task}''' )
if vocab_path:
A__ : str =SpeechTaTokenizer(UpperCamelCase , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
A__ : Optional[Any] =AddedToken("<mask>" , lstrip=UpperCamelCase , rstrip=UpperCamelCase )
A__ : int =mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
A__ : Dict =SpeechTaFeatureExtractor()
A__ : Tuple =SpeechTaProcessor(tokenizer=UpperCamelCase , feature_extractor=UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
A__ : Union[str, Any] =torch.load(UpperCamelCase )
recursively_load_weights(fairseq_checkpoint["model"] , UpperCamelCase , UpperCamelCase )
model.save_pretrained(UpperCamelCase )
if repo_id:
print("Pushing to the hub..." )
processor.push_to_hub(UpperCamelCase )
model.push_to_hub(UpperCamelCase )
if __name__ == "__main__":
__A : Dict = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
__A : str = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 656 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 25 | """simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase):
'''simple docstring'''
__magic_name__ : List[Any] = [R"""h\.\d+\.attn\.bias""", R"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 50257 , UpperCamelCase__ : int = 1024 , UpperCamelCase__ : int = 768 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str = "gelu_new" , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 1E-5 , UpperCamelCase__ : float = 0.02 , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , ):
super().__init__()
A__ : Dict =prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
F''' `n_embd`: {n_embd} are not equal.''' )
A__ : Optional[int] =prefix_inner_dim
A__ : Optional[int] =prefix_hidden_dim
A__ : Optional[int] =(
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A__ : Optional[int] =(
nn.Linear(self.prefix_hidden_dim , UpperCamelCase__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
A__ : str =GPTaConfig(
vocab_size=UpperCamelCase__ , n_positions=UpperCamelCase__ , n_embd=UpperCamelCase__ , n_layer=UpperCamelCase__ , n_head=UpperCamelCase__ , n_inner=UpperCamelCase__ , activation_function=UpperCamelCase__ , resid_pdrop=UpperCamelCase__ , embd_pdrop=UpperCamelCase__ , attn_pdrop=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ , initializer_range=UpperCamelCase__ , scale_attn_weights=UpperCamelCase__ , use_cache=UpperCamelCase__ , scale_attn_by_inverse_layer_idx=UpperCamelCase__ , reorder_and_upcast_attn=UpperCamelCase__ , )
A__ : Any =GPTaLMHeadModel(UpperCamelCase__ )
def _UpperCAmelCase ( self : Any , UpperCamelCase__ : torch.Tensor , UpperCamelCase__ : torch.Tensor , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[torch.Tensor] = None , ):
A__ : int =self.transformer.transformer.wte(UpperCamelCase__ )
A__ : Tuple =self.encode_prefix(UpperCamelCase__ )
A__ : Union[str, Any] =self.decode_prefix(UpperCamelCase__ )
A__ : Tuple =torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
A__ : Any =self.get_dummy_token(input_ids.shape[0] , input_ids.device )
A__ : List[Any] =torch.cat((dummy_token, input_ids) , dim=1 )
A__ : Any =self.transformer(inputs_embeds=UpperCamelCase__ , labels=UpperCamelCase__ , attention_mask=UpperCamelCase__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : torch.device ):
return torch.zeros(UpperCamelCase__ , self.prefix_length , dtype=torch.intaa , device=UpperCamelCase__ )
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Tuple ):
return self.encode_prefix(UpperCamelCase__ )
@torch.no_grad()
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str ):
A__ : Optional[int] =torch.split(UpperCamelCase__ , 1 , dim=0 )
A__ : List[str] =[]
A__ : Dict =[]
for feature in features:
A__ : Any =self.decode_prefix(feature.to(UpperCamelCase__ ) ) # back to the clip feature
# Only support beam search for now
A__ , A__ : Optional[Any] =self.generate_beam(
input_embeds=UpperCamelCase__ , device=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
A__ : Optional[Any] =torch.stack(UpperCamelCase__ )
A__ : Optional[int] =torch.stack(UpperCamelCase__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def _UpperCAmelCase ( self : List[Any] , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : int = 5 , UpperCamelCase__ : int = 67 , UpperCamelCase__ : float = 1.0 , UpperCamelCase__ : Optional[int] = None , ):
A__ : str =eos_token_id
A__ : Optional[Any] =None
A__ : int =None
A__ : Union[str, Any] =torch.ones(UpperCamelCase__ , device=UpperCamelCase__ , dtype=torch.int )
A__ : Any =torch.zeros(UpperCamelCase__ , device=UpperCamelCase__ , dtype=torch.bool )
if input_embeds is not None:
A__ : Union[str, Any] =input_embeds
else:
A__ : Optional[Any] =self.transformer.transformer.wte(UpperCamelCase__ )
for i in range(UpperCamelCase__ ):
A__ : Optional[int] =self.transformer(inputs_embeds=UpperCamelCase__ )
A__ : Tuple =outputs.logits
A__ : Union[str, Any] =logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A__ : Optional[Any] =logits.softmax(-1 ).log()
if scores is None:
A__ , A__ : Union[str, Any] =logits.topk(UpperCamelCase__ , -1 )
A__ : Union[str, Any] =generated.expand(UpperCamelCase__ , *generated.shape[1:] )
A__ , A__ : Optional[int] =next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
A__ : str =next_tokens
else:
A__ : Optional[Any] =tokens.expand(UpperCamelCase__ , *tokens.shape[1:] )
A__ : str =torch.cat((tokens, next_tokens) , dim=1 )
else:
A__ : Union[str, Any] =-float(np.inf )
A__ : Dict =0
A__ : Optional[Any] =scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A__ : Optional[Any] =scores_sum / seq_lengths[:, None]
A__ , A__ : List[Any] =scores_sum_average.view(-1 ).topk(UpperCamelCase__ , -1 )
A__ : Tuple =next_tokens // scores_sum.shape[1]
A__ : List[Any] =seq_lengths[next_tokens_source]
A__ : int =next_tokens % scores_sum.shape[1]
A__ : str =next_tokens.unsqueeze(1 )
A__ : List[Any] =tokens[next_tokens_source]
A__ : int =torch.cat((tokens, next_tokens) , dim=1 )
A__ : List[str] =generated[next_tokens_source]
A__ : Optional[Any] =scores_sum_average * seq_lengths
A__ : Optional[int] =is_stopped[next_tokens_source]
A__ : List[str] =self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
A__ : str =torch.cat((generated, next_token_embed) , dim=1 )
A__ : str =is_stopped + next_tokens.eq(UpperCamelCase__ ).squeeze()
if is_stopped.all():
break
A__ : Optional[int] =scores / seq_lengths
A__ : List[Any] =scores.argsort(descending=UpperCamelCase__ )
# tokens tensors are already padded to max_seq_length
A__ : int =[tokens[i] for i in order]
A__ : Any =torch.stack(UpperCamelCase__ , dim=0 )
A__ : int =torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 656 | 0 |
'''simple docstring'''
def _a ( _lowerCamelCase = 10 , _lowerCamelCase = 1000 , _lowerCamelCase = True ) -> int:
"""simple docstring"""
assert (
isinstance(_lowerCamelCase , _lowerCamelCase )
and isinstance(_lowerCamelCase , _lowerCamelCase )
and isinstance(_lowerCamelCase , _lowerCamelCase )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("""Invalid value for min_val or max_val (min_value < max_value)""" )
return min_val if option else max_val
def _a ( _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
return int((number_a + number_a) / 2 )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> None:
"""simple docstring"""
assert (
isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("""argument value for lower and higher must be(lower > higher)""" )
if not lower < to_guess < higher:
raise ValueError(
"""guess value must be within the range of lower and higher value""" )
def answer(_lowerCamelCase ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("""started...""" )
__snake_case : Any = lower
__snake_case : List[Any] = higher
__snake_case : Tuple = []
while True:
__snake_case : List[str] = get_avg(_lowerCamelCase , _lowerCamelCase )
last_numbers.append(_lowerCamelCase )
if answer(_lowerCamelCase ) == "low":
__snake_case : Union[str, Any] = number
elif answer(_lowerCamelCase ) == "high":
__snake_case : Dict = number
else:
break
print(F'''guess the number : {last_numbers[-1]}''' )
print(F'''details : {last_numbers!s}''' )
def _a ( ) -> None:
"""simple docstring"""
__snake_case : List[Any] = int(input("""Enter lower value : """ ).strip() )
__snake_case : Tuple = int(input("""Enter high value : """ ).strip() )
__snake_case : Tuple = int(input("""Enter value to guess : """ ).strip() )
guess_the_number(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main()
| 26 | """simple docstring"""
import os
def lowercase ( ):
"""simple docstring"""
A__ : List[Any] =os.path.dirname(os.path.realpath(UpperCamelCase ) )
A__ : str =os.path.join(UpperCamelCase , "triangle.txt" )
with open(UpperCamelCase ) as f:
A__ : Optional[int] =f.readlines()
A__ : str =[]
for line in triangle:
A__ : Union[str, Any] =[]
for number in line.strip().split(" " ):
numbers_from_line.append(int(UpperCamelCase ) )
a.append(UpperCamelCase )
for i in range(1 , len(UpperCamelCase ) ):
for j in range(len(a[i] ) ):
A__ : Union[str, Any] =a[i - 1][j] if j != len(a[i - 1] ) else 0
A__ : Union[str, Any] =a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(UpperCamelCase , UpperCamelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 656 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : Dict = logging.get_logger(__name__)
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
_A = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
_A = [144, 192, 240]
_A = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
_A = [96, 120, 144]
_A = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
_A = [64, 80, 96]
_A = [16, 16, 24, 48, 64, 80, 320]
_A = 0.05
_A = 2.0
if mobilevit_name.startswith('deeplabv3_' ):
_A = 512
_A = 16
_A = 21
_A = 'pascal-voc-id2label.json'
else:
_A = 1_000
_A = 'imagenet-1k-id2label.json'
_A = 'huggingface/label-files'
_A = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
_A = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
return config
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Tuple:
"""simple docstring"""
for i in range(1 , 6 ):
if F"layer_{i}." in name:
_A = name.replace(F"layer_{i}." , F"encoder.layer.{i - 1}." )
if "conv_1." in name:
_A = name.replace('conv_1.' , 'conv_stem.' )
if ".block." in name:
_A = name.replace('.block.' , '.' )
if "exp_1x1" in name:
_A = name.replace('exp_1x1' , 'expand_1x1' )
if "red_1x1" in name:
_A = name.replace('red_1x1' , 'reduce_1x1' )
if ".local_rep.conv_3x3." in name:
_A = name.replace('.local_rep.conv_3x3.' , '.conv_kxk.' )
if ".local_rep.conv_1x1." in name:
_A = name.replace('.local_rep.conv_1x1.' , '.conv_1x1.' )
if ".norm." in name:
_A = name.replace('.norm.' , '.normalization.' )
if ".conv." in name:
_A = name.replace('.conv.' , '.convolution.' )
if ".conv_proj." in name:
_A = name.replace('.conv_proj.' , '.conv_projection.' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F".{i}.{j}." in name:
_A = name.replace(F".{i}.{j}." , F".{i}.layer.{j}." )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F".{i}.{j}." in name:
_A = name.replace(F".{i}.{j}." , F".{i}." )
if "expand_1x1" in name:
_A = name.replace('expand_1x1' , 'downsampling_layer.expand_1x1' )
if "conv_3x3" in name:
_A = name.replace('conv_3x3' , 'downsampling_layer.conv_3x3' )
if "reduce_1x1" in name:
_A = name.replace('reduce_1x1' , 'downsampling_layer.reduce_1x1' )
for i in range(2 , 5 ):
if F".global_rep.{i}.weight" in name:
_A = name.replace(F".global_rep.{i}.weight" , '.layernorm.weight' )
if F".global_rep.{i}.bias" in name:
_A = name.replace(F".global_rep.{i}.bias" , '.layernorm.bias' )
if ".global_rep." in name:
_A = name.replace('.global_rep.' , '.transformer.' )
if ".pre_norm_mha.0." in name:
_A = name.replace('.pre_norm_mha.0.' , '.layernorm_before.' )
if ".pre_norm_mha.1.out_proj." in name:
_A = name.replace('.pre_norm_mha.1.out_proj.' , '.attention.output.dense.' )
if ".pre_norm_ffn.0." in name:
_A = name.replace('.pre_norm_ffn.0.' , '.layernorm_after.' )
if ".pre_norm_ffn.1." in name:
_A = name.replace('.pre_norm_ffn.1.' , '.intermediate.dense.' )
if ".pre_norm_ffn.4." in name:
_A = name.replace('.pre_norm_ffn.4.' , '.output.dense.' )
if ".transformer." in name:
_A = name.replace('.transformer.' , '.transformer.layer.' )
if ".aspp_layer." in name:
_A = name.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in name:
_A = name.replace('.aspp_pool.' , '.' )
if "seg_head." in name:
_A = name.replace('seg_head.' , 'segmentation_head.' )
if "segmentation_head.classifier.classifier." in name:
_A = name.replace('segmentation_head.classifier.classifier.' , 'segmentation_head.classifier.' )
if "classifier.fc." in name:
_A = name.replace('classifier.fc.' , 'classifier.' )
elif (not base_model) and ("segmentation_head." not in name):
_A = 'mobilevit.' + name
return name
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> int:
"""simple docstring"""
if base_model:
_A = ''
else:
_A = 'mobilevit.'
for key in orig_state_dict.copy().keys():
_A = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if key[:8] == "encoder.":
_A = key[8:]
if "qkv" in key:
_A = key.split('.' )
_A = int(key_split[0][6:] ) - 1
_A = int(key_split[3] )
_A = model.get_submodule(F"{model_prefix}encoder.layer.{layer_num}" )
_A = layer.transformer.layer[transformer_num].attention.attention.all_head_size
_A = (
F"{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."
)
if "weight" in key:
_A = val[:dim, :]
_A = val[dim : dim * 2, :]
_A = val[-dim:, :]
else:
_A = val[:dim]
_A = val[dim : dim * 2]
_A = val[-dim:]
else:
_A = val
return orig_state_dict
def __lowerCAmelCase( ) -> Tuple:
"""simple docstring"""
_A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_A = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> str:
"""simple docstring"""
_A = get_mobilevit_config(_SCREAMING_SNAKE_CASE )
# load original state_dict
_A = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )
# load 🤗 model
if mobilevit_name.startswith('deeplabv3_' ):
_A = MobileViTForSemanticSegmentation(_SCREAMING_SNAKE_CASE ).eval()
else:
_A = MobileViTForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
_A = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by MobileViTImageProcessor
_A = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_A = image_processor(images=prepare_img() , return_tensors='pt' )
_A = model(**_SCREAMING_SNAKE_CASE )
_A = outputs.logits
if mobilevit_name.startswith('deeplabv3_' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
_A = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
_A = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
_A = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
] )
else:
raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
else:
assert logits.shape == (1, 1_000)
if mobilevit_name == "mobilevit_s":
_A = torch.tensor([-0.9866, 0.2392, -1.1241] )
elif mobilevit_name == "mobilevit_xs":
_A = torch.tensor([-2.4761, -0.9399, -1.9587] )
elif mobilevit_name == "mobilevit_xxs":
_A = torch.tensor([-1.9364, -1.2327, -0.4653] )
else:
raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F"Saving model {mobilevit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
_A = {
'mobilevit_s': 'mobilevit-small',
'mobilevit_xs': 'mobilevit-x-small',
'mobilevit_xxs': 'mobilevit-xx-small',
'deeplabv3_mobilevit_s': 'deeplabv3-mobilevit-small',
'deeplabv3_mobilevit_xs': 'deeplabv3-mobilevit-x-small',
'deeplabv3_mobilevit_xxs': 'deeplabv3-mobilevit-xx-small',
}
print('Pushing to the hub...' )
_A = model_mapping[mobilevit_name]
image_processor.push_to_hub(_SCREAMING_SNAKE_CASE , organization='apple' )
model.push_to_hub(_SCREAMING_SNAKE_CASE , organization='apple' )
if __name__ == "__main__":
__A : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--mobilevit_name",
default="mobilevit_s",
type=str,
help=(
"Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"
" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."
),
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__A : str = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 27 | """simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A : int = logging.get_logger(__name__)
def lowercase ( UpperCamelCase : Any ):
"""simple docstring"""
A__ : str =OrderedDict()
for key, value in state_dict.items():
if key.startswith("module.encoder" ):
A__ : Dict =key.replace("module.encoder" , "glpn.encoder" )
if key.startswith("module.decoder" ):
A__ : Optional[int] =key.replace("module.decoder" , "decoder.stages" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
A__ : Tuple =key[key.find("patch_embed" ) + len("patch_embed" )]
A__ : Optional[Any] =key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(UpperCamelCase )-1}''' )
if "norm" in key:
A__ : Dict =key.replace("norm" , "layer_norm" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
A__ : Any =key[key.find("glpn.encoder.layer_norm" ) + len("glpn.encoder.layer_norm" )]
A__ : Tuple =key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(UpperCamelCase )-1}''' )
if "layer_norm1" in key:
A__ : List[Any] =key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
A__ : Optional[int] =key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
A__ : int =key[key.find("block" ) + len("block" )]
A__ : Optional[Any] =key.replace(F'''block{idx}''' , F'''block.{int(UpperCamelCase )-1}''' )
if "attn.q" in key:
A__ : Optional[Any] =key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
A__ : Union[str, Any] =key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
A__ : str =key.replace("attn" , "attention.self" )
if "fc1" in key:
A__ : Dict =key.replace("fc1" , "dense1" )
if "fc2" in key:
A__ : str =key.replace("fc2" , "dense2" )
if "linear_pred" in key:
A__ : List[Any] =key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
A__ : List[str] =key.replace("linear_fuse.conv" , "linear_fuse" )
A__ : Any =key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
A__ : str =key[key.find("linear_c" ) + len("linear_c" )]
A__ : Dict =key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(UpperCamelCase )-1}''' )
if "bot_conv" in key:
A__ : Union[str, Any] =key.replace("bot_conv" , "0.convolution" )
if "skip_conv1" in key:
A__ : List[Any] =key.replace("skip_conv1" , "1.convolution" )
if "skip_conv2" in key:
A__ : int =key.replace("skip_conv2" , "2.convolution" )
if "fusion1" in key:
A__ : Optional[Any] =key.replace("fusion1" , "1.fusion" )
if "fusion2" in key:
A__ : Optional[Any] =key.replace("fusion2" , "2.fusion" )
if "fusion3" in key:
A__ : int =key.replace("fusion3" , "3.fusion" )
if "fusion" in key and "conv" in key:
A__ : List[str] =key.replace("conv" , "convolutional_layer" )
if key.startswith("module.last_layer_depth" ):
A__ : Tuple =key.replace("module.last_layer_depth" , "head.head" )
A__ : int =value
return new_state_dict
def lowercase ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict ):
"""simple docstring"""
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
A__ : int =state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
A__ : str =state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
A__ : List[str] =kv_weight[
: config.hidden_sizes[i], :
]
A__ : Dict =kv_bias[: config.hidden_sizes[i]]
A__ : Any =kv_weight[
config.hidden_sizes[i] :, :
]
A__ : Any =kv_bias[config.hidden_sizes[i] :]
def lowercase ( ):
"""simple docstring"""
A__ : Optional[Any] ="http://images.cocodataset.org/val2017/000000039769.jpg"
A__ : List[Any] =Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return image
@torch.no_grad()
def lowercase ( UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : List[str]=False , UpperCamelCase : str=None ):
"""simple docstring"""
A__ : List[str] =GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
A__ : str =GLPNImageProcessor()
# prepare image
A__ : Any =prepare_img()
A__ : Optional[int] =image_processor(images=UpperCamelCase , return_tensors="pt" ).pixel_values
logger.info("Converting model..." )
# load original state dict
A__ : int =torch.load(UpperCamelCase , map_location=torch.device("cpu" ) )
# rename keys
A__ : Union[str, Any] =rename_keys(UpperCamelCase )
# key and value matrices need special treatment
read_in_k_v(UpperCamelCase , UpperCamelCase )
# create HuggingFace model and load state dict
A__ : Optional[int] =GLPNForDepthEstimation(UpperCamelCase )
model.load_state_dict(UpperCamelCase )
model.eval()
# forward pass
A__ : int =model(UpperCamelCase )
A__ : Optional[Any] =outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
A__ : List[Any] =torch.tensor(
[[4.41_47, 4.08_73, 4.06_73], [3.78_90, 3.28_81, 3.15_25], [3.76_74, 3.54_23, 3.49_13]] )
elif "kitti" in model_name:
A__ : Tuple =torch.tensor(
[[3.42_91, 2.78_65, 2.51_51], [3.28_41, 2.70_21, 2.35_02], [3.11_47, 2.46_25, 2.24_81]] )
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
A__ : str =torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , UpperCamelCase , atol=1E-4 )
print("Looks ok!" )
# finally, push to hub if required
if push_to_hub:
logger.info("Pushing model and image processor to the hub..." )
model.push_to_hub(
repo_path_or_name=Path(UpperCamelCase , UpperCamelCase ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=UpperCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCamelCase , UpperCamelCase ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=UpperCamelCase , )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path",
default=None,
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
parser.add_argument(
"--model_name",
default="glpn-kitti",
type=str,
help="Name of the model in case you're pushing to the hub.",
)
__A : Any = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 656 | 0 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
UpperCamelCase_ = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = Github(os.environ['GITHUB_TOKEN'] )
SCREAMING_SNAKE_CASE : Tuple = g.get_repo('huggingface/transformers' )
SCREAMING_SNAKE_CASE : List[Any] = repo.get_issues(state='open' )
for issue in open_issues:
SCREAMING_SNAKE_CASE : Any = sorted([comment for comment in issue.get_comments()] ,key=lambda __UpperCamelCase : i.created_at ,reverse=__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = comments[0] if len(__UpperCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='closed' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 28 | """simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__A : Any = logging.get_logger(__name__)
__A : Optional[Any] = {
"EleutherAI/gpt-neo-1.3B": "https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Union[str, Any] = """gpt_neo"""
__magic_name__ : Union[str, Any] = ["""past_key_values"""]
__magic_name__ : Dict = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self : Dict , UpperCamelCase__ : List[Any]=50257 , UpperCamelCase__ : Optional[Any]=2048 , UpperCamelCase__ : Tuple=2048 , UpperCamelCase__ : int=24 , UpperCamelCase__ : Dict=[[["global", "local"], 12]] , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : str=256 , UpperCamelCase__ : List[str]="gelu_new" , UpperCamelCase__ : List[str]=0.0 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : List[str]=1E-5 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[Any]=50256 , UpperCamelCase__ : List[str]=50256 , **UpperCamelCase__ : str , ):
A__ : Optional[Any] =vocab_size
A__ : Dict =max_position_embeddings
A__ : List[str] =hidden_size
A__ : List[Any] =num_layers
A__ : Tuple =num_heads
A__ : List[str] =intermediate_size
A__ : Tuple =window_size
A__ : Dict =activation_function
A__ : str =resid_dropout
A__ : Union[str, Any] =embed_dropout
A__ : List[str] =attention_dropout
A__ : Tuple =classifier_dropout
A__ : int =layer_norm_epsilon
A__ : int =initializer_range
A__ : str =use_cache
A__ : Tuple =bos_token_id
A__ : int =eos_token_id
A__ : int =attention_types
A__ : Any =self.expand_attention_types_params(UpperCamelCase__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
@staticmethod
def _UpperCAmelCase ( UpperCamelCase__ : List[str] ):
A__ : Optional[Any] =[]
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowercase ( UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ):
"""simple docstring"""
import torch
A__ : List[str] =input.size()
A__ : Dict =len(UpperCamelCase )
A__ : Optional[int] =shape[dimension]
A__ : str =torch.arange(0 , UpperCamelCase , UpperCamelCase )
A__ : Optional[int] =torch.div(sizedim - size , UpperCamelCase , rounding_mode="floor" ) + 1
A__ : str =torch.arange(UpperCamelCase ) + low_indices[:min_length][:, None]
A__ : Tuple =[slice(UpperCamelCase )] * rank
A__ : int =indices
A__ : Optional[int] =input[s]
A__ : Union[str, Any] =list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(UpperCamelCase )
def lowercase ( UpperCamelCase : str , UpperCamelCase : Any ):
"""simple docstring"""
import torch
A__ : List[str] =torch.arange(1 , UpperCamelCase )
A__ : List[Any] =torch.remainder(UpperCamelCase , UpperCamelCase )
A__ : Optional[int] =remainders == 0
A__ : str =candidates[divisor_indices]
A__ : int =torch.max(UpperCamelCase )
return largest_divisor, torch.div(UpperCamelCase , UpperCamelCase , rounding_mode="floor" )
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
@property
def _UpperCAmelCase ( self : List[Any] ):
A__ : Optional[int] =OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction="inputs" )
A__ : Optional[int] ={0: "batch", 1: "past_sequence + sequence"}
else:
A__ : Tuple ={0: "batch", 1: "sequence"}
return common_inputs
@property
def _UpperCAmelCase ( self : List[str] ):
return self._config.num_heads
def _UpperCAmelCase ( self : int , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
A__ : Union[str, Any] =super(UpperCamelCase__ , self ).generate_dummy_inputs(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
# We need to order the input in the way they appears in the forward()
A__ : List[Any] =OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A__ , A__ : Union[str, Any] =common_inputs["input_ids"].shape
# Not using the same length for past_key_values
A__ : Union[str, Any] =seqlen + 2
A__ : List[Any] =(
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
A__ : Optional[Any] =[
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(self.num_layers )
]
A__ : Optional[Any] =common_inputs["attention_mask"]
if self.use_past:
A__ : Any =ordered_inputs["attention_mask"].dtype
A__ : Tuple =torch.cat(
[ordered_inputs["attention_mask"], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
return ordered_inputs
@property
def _UpperCAmelCase ( self : List[str] ):
return 13
| 656 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class __lowerCamelCase :
def __init__( self , UpperCAmelCase , ):
lowerCamelCase_ = parent
lowerCamelCase_ = 13
lowerCamelCase_ = 7
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = 99
lowerCamelCase_ = 32
lowerCamelCase_ = 2
lowerCamelCase_ = 4
lowerCamelCase_ = 37
lowerCamelCase_ = '''gelu'''
lowerCamelCase_ = 0.1
lowerCamelCase_ = 0.1
lowerCamelCase_ = 512
lowerCamelCase_ = 16
lowerCamelCase_ = 2
lowerCamelCase_ = 0.0_2
lowerCamelCase_ = 3
lowerCamelCase_ = 4
lowerCamelCase_ = None
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self ):
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = self.prepare_config_and_inputs()
lowerCamelCase_ = True
lowerCamelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = TFEsmModel(config=UpperCAmelCase )
lowerCamelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowerCamelCase_ = model(UpperCAmelCase )
lowerCamelCase_ = [input_ids, input_mask]
lowerCamelCase_ = model(UpperCAmelCase )
lowerCamelCase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ):
lowerCamelCase_ = True
lowerCamelCase_ = TFEsmModel(config=UpperCAmelCase )
lowerCamelCase_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''encoder_hidden_states''': encoder_hidden_states,
'''encoder_attention_mask''': encoder_attention_mask,
}
lowerCamelCase_ = model(UpperCAmelCase )
lowerCamelCase_ = [input_ids, input_mask]
lowerCamelCase_ = model(UpperCAmelCase , encoder_hidden_states=UpperCAmelCase )
# Also check the case where encoder outputs are not passed
lowerCamelCase_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = TFEsmForMaskedLM(config=UpperCAmelCase )
lowerCamelCase_ = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = TFEsmForTokenClassification(config=UpperCAmelCase )
lowerCamelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowerCamelCase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
a__: Optional[Any] = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
a__: Tuple = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
a__: str = False
a__: Tuple = False
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = TFEsmModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def UpperCAmelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def UpperCAmelCase__ ( self ):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFEsmModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@unittest.skip('''Protein models do not support embedding resizing.''' )
def UpperCAmelCase__ ( self ):
pass
@unittest.skip('''Protein models do not support embedding resizing.''' )
def UpperCAmelCase__ ( self ):
pass
def UpperCAmelCase__ ( self ):
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(UpperCAmelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCamelCase_ = model.get_bias()
assert isinstance(UpperCAmelCase , UpperCAmelCase )
for k, v in name.items():
assert isinstance(UpperCAmelCase , tf.Variable )
else:
lowerCamelCase_ = model.get_output_embeddings()
assert x is None
lowerCamelCase_ = model.get_bias()
assert name is None
@require_tf
class __lowerCamelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
lowerCamelCase_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ = model(UpperCAmelCase )[0]
lowerCamelCase_ = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , UpperCAmelCase )
# compare the actual values for a slice.
lowerCamelCase_ = tf.constant(
[
[
[8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7],
[-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5],
[-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
lowerCamelCase_ = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase_ = model(UpperCAmelCase )[0]
# compare the actual values for a slice.
lowerCamelCase_ = tf.constant(
[
[
[0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9],
[0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2],
[0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 29 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Union[str, Any] = logging.get_logger(__name__)
__A : Any = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Tuple = """megatron-bert"""
def __init__( self : Tuple , UpperCamelCase__ : Dict=29056 , UpperCamelCase__ : int=1024 , UpperCamelCase__ : Optional[int]=24 , UpperCamelCase__ : Dict=16 , UpperCamelCase__ : int=4096 , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : int=512 , UpperCamelCase__ : str=2 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : str="absolute" , UpperCamelCase__ : Dict=True , **UpperCamelCase__ : Tuple , ):
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A__ : Optional[int] =vocab_size
A__ : Optional[int] =hidden_size
A__ : str =num_hidden_layers
A__ : Any =num_attention_heads
A__ : str =hidden_act
A__ : Optional[int] =intermediate_size
A__ : str =hidden_dropout_prob
A__ : str =attention_probs_dropout_prob
A__ : List[Any] =max_position_embeddings
A__ : List[Any] =type_vocab_size
A__ : Tuple =initializer_range
A__ : Any =layer_norm_eps
A__ : Any =position_embedding_type
A__ : Union[str, Any] =use_cache
| 656 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __a( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = ViTImageProcessor if is_vision_available() else None
@property
def a__ ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = (3, 32, 128)
UpperCAmelCase_ : Optional[Any] = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase_ : Any = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
UpperCAmelCase_ : Any = dict(zip(_SCREAMING_SNAKE_CASE ,range(len(_SCREAMING_SNAKE_CASE ) ) ) )
UpperCAmelCase_ : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + '''\n''' )
UpperCAmelCase_ : List[str] = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
UpperCAmelCase_ : Union[str, Any] = os.path.join(self.tmpdirname ,_SCREAMING_SNAKE_CASE )
with open(self.image_processor_file ,'''w''' ,encoding='''utf-8''' ) as fp:
json.dump(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> Any:
return MgpstrTokenizer.from_pretrained(self.tmpdirname ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> str:
return ViTImageProcessor.from_pretrained(self.tmpdirname ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : List[str] = np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )
UpperCAmelCase_ : Optional[int] = Image.fromarray(np.moveaxis(_SCREAMING_SNAKE_CASE ,0 ,-1 ) )
return image_input
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Tuple = self.get_tokenizer()
UpperCAmelCase_ : Tuple = self.get_image_processor()
UpperCAmelCase_ : Tuple = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Union[str, Any] = MgpstrProcessor.from_pretrained(self.tmpdirname ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(processor.char_tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer ,_SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Any:
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Optional[Any] = self.get_image_processor()
UpperCAmelCase_ : str = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer(bos_token='''(BOS)''' ,eos_token='''(EOS)''' )
UpperCAmelCase_ : List[Any] = self.get_image_processor(do_normalize=_SCREAMING_SNAKE_CASE ,padding_value=1.0 )
UpperCAmelCase_ : int = MgpstrProcessor.from_pretrained(
self.tmpdirname ,bos_token='''(BOS)''' ,eos_token='''(EOS)''' ,do_normalize=_SCREAMING_SNAKE_CASE ,padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer ,_SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Dict:
UpperCAmelCase_ : Tuple = self.get_image_processor()
UpperCAmelCase_ : List[str] = self.get_tokenizer()
UpperCAmelCase_ : List[Any] = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = self.prepare_image_inputs()
UpperCAmelCase_ : Dict = image_processor(_SCREAMING_SNAKE_CASE ,return_tensors='''np''' )
UpperCAmelCase_ : Dict = processor(images=_SCREAMING_SNAKE_CASE ,return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : Dict = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = '''test'''
UpperCAmelCase_ : Optional[Any] = processor(text=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = tokenizer(_SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : int = self.get_image_processor()
UpperCAmelCase_ : List[str] = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = '''test'''
UpperCAmelCase_ : List[Any] = self.prepare_image_inputs()
UpperCAmelCase_ : List[str] = processor(text=_SCREAMING_SNAKE_CASE ,images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) ,['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = self.get_image_processor()
UpperCAmelCase_ : List[str] = self.get_tokenizer()
UpperCAmelCase_ : Dict = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ : List[str] = processor.char_decode(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = [seq.replace(''' ''' ,'''''' ) for seq in decoded_tok]
self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = self.get_image_processor()
UpperCAmelCase_ : Tuple = self.get_tokenizer()
UpperCAmelCase_ : str = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Union[str, Any] = self.prepare_image_inputs()
UpperCAmelCase_ : Optional[int] = processor(text=_SCREAMING_SNAKE_CASE ,images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = self.get_image_processor()
UpperCAmelCase_ : str = self.get_tokenizer()
UpperCAmelCase_ : List[Any] = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = torch.randn(1 ,27 ,38 )
UpperCAmelCase_ : Any = torch.randn(1 ,27 ,50_257 )
UpperCAmelCase_ : List[Any] = torch.randn(1 ,27 ,30_522 )
UpperCAmelCase_ : List[str] = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) ,['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] ) | 30 | """simple docstring"""
from __future__ import annotations
def lowercase ( UpperCamelCase : list[float] ):
"""simple docstring"""
if len(UpperCamelCase ) < 2:
raise ValueError("Monogons and Digons are not polygons in the Euclidean space" )
if any(i <= 0 for i in nums ):
raise ValueError("All values must be greater than 0" )
A__ : Union[str, Any] =nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 656 | 0 |
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class lowerCamelCase_ :
'''simple docstring'''
lowercase_ = 42 # [batch_size x 3]
lowercase_ = 42 # [batch_size x 3]
lowercase_ = 42 # [batch_size x 3]
lowercase_ = 42 # [batch_size x 3]
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
def lowerCAmelCase_ ( self : Dict ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def lowerCAmelCase_ ( self : Optional[Any] ):
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def lowerCAmelCase_ ( self : str ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = torch.arange(self.height * self.width )
SCREAMING_SNAKE_CASE_ = torch.stack(
[
pixel_indices % self.width,
torch.div(_lowerCAmelCase , self.width , rounding_mode='trunc' ),
] , axis=1 , )
return coords
@property
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ = self.shape
SCREAMING_SNAKE_CASE_ = int(np.prod(_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = self.get_image_coords()
SCREAMING_SNAKE_CASE_ = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
SCREAMING_SNAKE_CASE_ = self.get_camera_rays(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = rays.view(_lowerCAmelCase , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : torch.Tensor ):
SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
SCREAMING_SNAKE_CASE_ = coords.view(_lowerCAmelCase , -1 , 2 )
SCREAMING_SNAKE_CASE_ = self.resolution()
SCREAMING_SNAKE_CASE_ = self.fov()
SCREAMING_SNAKE_CASE_ = (flat.float() / (res - 1)) * 2 - 1
SCREAMING_SNAKE_CASE_ = fracs * torch.tan(fov / 2 )
SCREAMING_SNAKE_CASE_ = fracs.view(_lowerCAmelCase , -1 , 2 )
SCREAMING_SNAKE_CASE_ = (
self.z.view(_lowerCAmelCase , 1 , 3 )
+ self.x.view(_lowerCAmelCase , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(_lowerCAmelCase , 1 , 3 ) * fracs[:, :, 1:]
)
SCREAMING_SNAKE_CASE_ = directions / directions.norm(dim=-1 , keepdim=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.stack(
[
torch.broadcast_to(self.origin.view(_lowerCAmelCase , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(_lowerCAmelCase , *_lowerCAmelCase , 2 , 3 )
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : int ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=_lowerCAmelCase , height=_lowerCAmelCase , x_fov=self.x_fov , y_fov=self.y_fov , )
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> DifferentiableProjectiveCamera:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
SCREAMING_SNAKE_CASE_ = np.array([np.sin(__UpperCAmelCase ), np.cos(__UpperCAmelCase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
SCREAMING_SNAKE_CASE_ = -z * 4
SCREAMING_SNAKE_CASE_ = np.array([np.cos(__UpperCAmelCase ), -np.sin(__UpperCAmelCase ), 0.0] )
SCREAMING_SNAKE_CASE_ = np.cross(__UpperCAmelCase , __UpperCAmelCase )
origins.append(__UpperCAmelCase )
xs.append(__UpperCAmelCase )
ys.append(__UpperCAmelCase )
zs.append(__UpperCAmelCase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(__UpperCAmelCase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(__UpperCAmelCase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(__UpperCAmelCase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(__UpperCAmelCase , axis=0 ) ).float() , width=__UpperCAmelCase , height=__UpperCAmelCase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(__UpperCAmelCase )) , ) | 31 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__A : Optional[Any] = {
"configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
"MEGA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegaForCausalLM",
"MegaForMaskedLM",
"MegaForMultipleChoice",
"MegaForQuestionAnswering",
"MegaForSequenceClassification",
"MegaForTokenClassification",
"MegaModel",
"MegaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__A : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 656 | 0 |
from __future__ import annotations
def A__ ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ) -> float:
"""simple docstring"""
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def A__ ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , ) -> float:
"""simple docstring"""
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def A__ ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , ) -> float:
"""simple docstring"""
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
SCREAMING_SNAKE_CASE_ , nominal_annual_percentage_rate / 3_65 , number_of_years * 3_65 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 | """simple docstring"""
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
if num <= 0:
raise ValueError("Input must be a positive integer" )
A__ : Union[str, Any] =[True] * (num + 1)
A__ : Union[str, Any] =2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , UpperCamelCase ):
A__ : str =False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Optional[int] = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 656 | 0 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Union[str, Any] = IFInpaintingSuperResolutionPipeline
__lowercase : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
__lowercase : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
__lowercase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'latents'}
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return self._get_superresolution_dummy_components()
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:Dict , _a:Optional[int]=0 ):
if str(_a ).startswith('''mps''' ):
snake_case__ = torch.manual_seed(_a )
else:
snake_case__ = torch.Generator(device=_a ).manual_seed(_a )
snake_case__ = floats_tensor((1, 3, 16, 16) , rng=random.Random(_a ) ).to(_a )
snake_case__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
snake_case__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
snake_case__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def SCREAMING_SNAKE_CASE__ ( self:str ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
self._test_save_load_local()
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 33 | """simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
def _UpperCAmelCase ( self : List[Any] ):
A__ : Tuple =torch.nn.Linear(10 , 10 )
A__ : List[str] =torch.optim.SGD(model.parameters() , 0.1 )
A__ : Union[str, Any] =Accelerator()
A__ : str =accelerator.prepare(UpperCamelCase__ )
try:
pickle.loads(pickle.dumps(UpperCamelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 656 | 0 |
"""simple docstring"""
import doctest
from collections import deque
import numpy as np
class snake_case_ :
"""simple docstring"""
def __init__( self) -> None:
UpperCamelCase = [2, 1, 2, -1]
UpperCamelCase = [1, 2, 3, 4]
def UpperCAmelCase__ ( self) -> list[float]:
UpperCamelCase = len(self.first_signal)
UpperCamelCase = len(self.second_signal)
UpperCamelCase = max(lowerCamelCase_ , lowerCamelCase_)
# create a zero matrix of max_length x max_length
UpperCamelCase = [[0] * max_length for i in range(lowerCamelCase_)]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowerCamelCase_):
UpperCamelCase = deque(self.second_signal)
rotated_signal.rotate(lowerCamelCase_)
for j, item in enumerate(lowerCamelCase_):
matrix[i][j] += item
# multiply the matrix with the first signal
UpperCamelCase = np.matmul(np.transpose(lowerCamelCase_) , np.transpose(self.first_signal))
# rounding-off to two decimal places
return [round(lowerCamelCase_ , 2) for i in final_signal]
if __name__ == "__main__":
doctest.testmod() | 34 | """simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__A : Optional[int] = None
__A : Union[str, Any] = logging.get_logger(__name__)
__A : List[Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__A : str = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
},
"tokenizer_file": {
"google/bigbird-roberta-base": (
"https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"
),
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"
),
},
}
__A : List[str] = {
"google/bigbird-roberta-base": 4_096,
"google/bigbird-roberta-large": 4_096,
"google/bigbird-base-trivia-itc": 4_096,
}
__A : Tuple = "▁"
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Dict = VOCAB_FILES_NAMES
__magic_name__ : Any = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : List[Any] = BigBirdTokenizer
__magic_name__ : Any = ["""input_ids""", """attention_mask"""]
__magic_name__ : List[int] = []
def __init__( self : str , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Union[str, Any]="<unk>" , UpperCamelCase__ : str="<s>" , UpperCamelCase__ : int="</s>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[Any]="[SEP]" , UpperCamelCase__ : List[Any]="[MASK]" , UpperCamelCase__ : str="[CLS]" , **UpperCamelCase__ : List[Any] , ):
A__ : Optional[int] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else bos_token
A__ : Optional[Any] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token
A__ : Optional[int] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else unk_token
A__ : int =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token
A__ : str =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cls_token
A__ : List[Any] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
A__ : str =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , **UpperCamelCase__ , )
A__ : List[Any] =vocab_file
A__ : Optional[int] =False if not self.vocab_file else True
def _UpperCAmelCase ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : Tuple =[self.sep_token_id]
A__ : str =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1]
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : Tuple =[self.sep_token_id]
A__ : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A__ : List[str] =os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
return (out_vocab_file,)
| 656 | 0 |
from __future__ import annotations
def a ( A__ ) -> float:
'''simple docstring'''
if not nums:
raise ValueError('''List is empty''' )
return sum(A__ ) / len(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | """simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Optional[int] = {"vocab_file": "spiece.model"}
__A : List[Any] = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Dict="<s>" , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : Union[str, Any]="<unk>" , UpperCamelCase__ : Optional[int]="<sep>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[int]="<cls>" , UpperCamelCase__ : List[str]="<mask>" , UpperCamelCase__ : Optional[Any]=["<eop>", "<eod>"] , UpperCamelCase__ : Optional[Dict[str, Any]] = None , **UpperCamelCase__ : Dict , ):
A__ : List[str] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
A__ : Tuple ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
A__ : Dict =3
A__ : int =do_lower_case
A__ : str =remove_space
A__ : Optional[Any] =keep_accents
A__ : int =vocab_file
A__ : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
A__ : Union[str, Any] =jieba
A__ : List[str] =str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _UpperCAmelCase ( self : Union[str, Any] ):
return len(self.sp_model )
def _UpperCAmelCase ( self : Optional[int] ):
A__ : Any ={self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ):
A__ : Union[str, Any] =self.__dict__.copy()
A__ : Tuple =None
return state
def __setstate__( self : Tuple , UpperCamelCase__ : int ):
A__ : Union[str, Any] =d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A__ : Optional[int] ={}
A__ : Union[str, Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Dict ):
if self.remove_space:
A__ : Optional[int] =" ".join(inputs.strip().split() )
else:
A__ : Optional[Any] =inputs
A__ : Any =outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
A__ : Optional[Any] =unicodedata.normalize("NFKD" , UpperCamelCase__ )
A__ : Tuple ="".join([c for c in outputs if not unicodedata.combining(UpperCamelCase__ )] )
if self.do_lower_case:
A__ : str =outputs.lower()
return outputs
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : str ):
A__ : Optional[int] =self.preprocess_text(UpperCamelCase__ )
A__ : Dict =self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
A__ : List[str] =[]
for piece in pieces:
if len(UpperCamelCase__ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
A__ : str =self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase__ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A__ : Union[str, Any] =cur_pieces[1:]
else:
A__ : List[str] =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCamelCase__ )
else:
new_pieces.append(UpperCamelCase__ )
return new_pieces
def _UpperCAmelCase ( self : int , UpperCamelCase__ : str ):
return self.sp_model.PieceToId(UpperCamelCase__ )
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : List[Any] ):
return self.sp_model.IdToPiece(UpperCamelCase__ )
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : str ):
A__ : Optional[int] ="".join(UpperCamelCase__ ).replace(UpperCamelCase__ , " " ).strip()
return out_string
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : List[str] =[self.sep_token_id]
A__ : str =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is not None:
return ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1, 1]
return ([0] * len(UpperCamelCase__ )) + [1, 1]
def _UpperCAmelCase ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : List[str] =[self.sep_token_id]
A__ : Optional[Any] =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A__ : Tuple =os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
A__ : Optional[Any] =self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
def _UpperCAmelCase ( self : str , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : int ):
A__ : List[Any] =super()._decode(*UpperCamelCase__ , **UpperCamelCase__ )
A__ : Union[str, Any] =text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text
| 656 | 0 |
import os
import pytest
from attr import dataclass
__lowercase : Optional[int] = '''us-east-1''' # defaults region
@dataclass
class _A :
'''simple docstring'''
__lowerCamelCase : str
__lowerCamelCase : Dict = '''arn:aws:iam::558105141721:role/sagemaker_execution_role'''
__lowerCamelCase : Optional[Any] = {
'''task_name''': '''mnli''',
'''per_device_train_batch_size''': 1_6,
'''per_device_eval_batch_size''': 1_6,
'''do_train''': True,
'''do_eval''': True,
'''do_predict''': True,
'''output_dir''': '''/opt/ml/model''',
'''overwrite_output_dir''': True,
'''max_steps''': 5_0_0,
'''save_steps''': 5_5_0_0,
}
__lowerCamelCase : List[str] = {**hyperparameters, '''max_steps''': 1_0_0_0}
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def snake_case_ ( self ):
'''simple docstring'''
return F"""{self.framework}-transfromers-test"""
@property
def snake_case_ ( self ):
'''simple docstring'''
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def lowercase ( __A : List[str] ) -> List[str]:
'''simple docstring'''
snake_case : Optional[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 36 | """simple docstring"""
def lowercase ( UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ):
"""simple docstring"""
def count_of_possible_combinations(UpperCamelCase : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(UpperCamelCase )
def lowercase ( UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ):
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
UpperCamelCase : int , UpperCamelCase : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
A__ : str =sum(
count_of_possible_combinations_with_dp_array(target - item , UpperCamelCase )
for item in array )
A__ : List[str] =answer
return answer
A__ : List[Any] =[-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(UpperCamelCase , UpperCamelCase )
def lowercase ( UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ):
"""simple docstring"""
A__ : str =[0] * (target + 1)
A__ : Optional[Any] =1
for i in range(1 , target + 1 ):
for j in range(UpperCamelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Optional[Any] = 3
__A : Optional[Any] = 5
__A : int = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 656 | 0 |
def UpperCamelCase_ ( ) -> List[Any]:
a__ : Optional[int] = []
a__ : Dict = 1
while len(__a ) < 1e6:
constant.append(str(__a ) )
i += 1
a__ : Dict = "".join(__a )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9_999] )
* int(constant[99_999] )
* int(constant[999_999] )
)
if __name__ == "__main__":
print(solution())
| 37 | """simple docstring"""
import math
import tensorflow as tf
from packaging import version
def lowercase ( UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A__ : List[Any] =tf.convert_to_tensor(UpperCamelCase )
A__ : List[Any] =0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def lowercase ( UpperCamelCase : Optional[int] ):
"""simple docstring"""
A__ : Optional[Any] =tf.convert_to_tensor(UpperCamelCase )
A__ : Tuple =tf.cast(math.pi , x.dtype )
A__ : Dict =tf.cast(0.04_47_15 , x.dtype )
A__ : Optional[int] =0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(UpperCamelCase , 3 )) ))
return x * cdf
def lowercase ( UpperCamelCase : Optional[int] ):
"""simple docstring"""
A__ : List[str] =tf.convert_to_tensor(UpperCamelCase )
return x * tf.tanh(tf.math.softplus(UpperCamelCase ) )
def lowercase ( UpperCamelCase : List[str] ):
"""simple docstring"""
A__ : Union[str, Any] =tf.convert_to_tensor(UpperCamelCase )
A__ : List[Any] =tf.cast(0.04_47_15 , x.dtype )
A__ : List[Any] =tf.cast(0.79_78_84_56_08 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowercase ( UpperCamelCase : List[Any] ):
"""simple docstring"""
A__ : List[str] =tf.convert_to_tensor(UpperCamelCase )
A__ : str =tf.cast(1.7_02 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowercase ( UpperCamelCase : Tuple ):
"""simple docstring"""
return tf.clip_by_value(_gelu(UpperCamelCase ) , -10 , 10 )
def lowercase ( UpperCamelCase : str , UpperCamelCase : Any=-1 ):
"""simple docstring"""
A__ , A__ : Optional[Any] =tf.split(UpperCamelCase , 2 , axis=UpperCamelCase )
return a * tf.math.sigmoid(UpperCamelCase )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
return tf.keras.activations.gelu(UpperCamelCase , approximate=UpperCamelCase )
__A : Optional[Any] = tf.keras.activations.gelu
__A : Optional[Any] = approximate_gelu_wrap
else:
__A : Any = _gelu
__A : Union[str, Any] = _gelu_new
__A : List[str] = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def lowercase ( UpperCamelCase : List[Any] ):
"""simple docstring"""
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
| 656 | 0 |
'''simple docstring'''
import argparse
import os
import re
A_ : Optional[int] = "src/diffusers"
# Pattern that looks at the indentation in a line.
A_ : Optional[int] = re.compile(R"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
A_ : str = re.compile(R"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
A_ : Optional[int] = re.compile(R"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
A_ : str = re.compile(R"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
A_ : List[Any] = re.compile(R"\[([^\]]+)\]")
def UpperCamelCase__ ( __magic_name__ : str ) -> List[str]:
'''simple docstring'''
snake_case__ : Dict = _re_indent.search(__magic_name__ )
return "" if search is None else search.groups()[0]
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : List[Any]="" , __magic_name__ : Optional[Any]=None , __magic_name__ : List[str]=None ) -> List[str]:
'''simple docstring'''
snake_case__ : List[str] = 0
snake_case__ : List[Any] = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(__magic_name__ ):
index += 1
snake_case__ : Dict = ["""\n""".join(lines[:index] )]
else:
snake_case__ : Tuple = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
snake_case__ : Tuple = [lines[index]]
index += 1
while index < len(__magic_name__ ) and (end_prompt is None or not lines[index].startswith(__magic_name__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__magic_name__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(__magic_name__ ) )
if index < len(__magic_name__ ) - 1:
snake_case__ : List[str] = [lines[index + 1]]
index += 1
else:
snake_case__ : Union[str, Any] = []
else:
blocks.append("""\n""".join(__magic_name__ ) )
snake_case__ : List[Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__magic_name__ ) > 0:
blocks.append("""\n""".join(__magic_name__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__magic_name__ ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def UpperCamelCase__ ( __magic_name__ : int ) -> int:
'''simple docstring'''
def _inner(__magic_name__ : List[Any] ):
return key(__magic_name__ ).lower().replace("""_""" , """""" )
return _inner
def UpperCamelCase__ ( __magic_name__ : Dict , __magic_name__ : Optional[int]=None ) -> Optional[Any]:
'''simple docstring'''
def noop(__magic_name__ : Union[str, Any] ):
return x
if key is None:
snake_case__ : Optional[int] = noop
# Constants are all uppercase, they go first.
snake_case__ : Tuple = [obj for obj in objects if key(__magic_name__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
snake_case__ : Any = [obj for obj in objects if key(__magic_name__ )[0].isupper() and not key(__magic_name__ ).isupper()]
# Functions begin with a lowercase, they go last.
snake_case__ : str = [obj for obj in objects if not key(__magic_name__ )[0].isupper()]
snake_case__ : Dict = ignore_underscore(__magic_name__ )
return sorted(__magic_name__ , key=__magic_name__ ) + sorted(__magic_name__ , key=__magic_name__ ) + sorted(__magic_name__ , key=__magic_name__ )
def UpperCamelCase__ ( __magic_name__ : str ) -> str:
'''simple docstring'''
def _replace(__magic_name__ : Tuple ):
snake_case__ : Union[str, Any] = match.groups()[0]
if "," not in imports:
return f"[{imports}]"
snake_case__ : Any = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
snake_case__ : Optional[int] = keys[:-1]
return "[" + ", ".join([f"\"{k}\"" for k in sort_objects(__magic_name__ )] ) + "]"
snake_case__ : Any = import_statement.split("""\n""" )
if len(__magic_name__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
snake_case__ : List[str] = 2 if lines[1].strip() == """[""" else 1
snake_case__ : Union[str, Any] = [(i, _re_strip_line.search(__magic_name__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
snake_case__ : List[Any] = sort_objects(__magic_name__ , key=lambda __magic_name__ : x[1] )
snake_case__ : Any = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__magic_name__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
snake_case__ : Dict = _re_bracket_content.sub(_replace , lines[1] )
else:
snake_case__ : int = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
snake_case__ : Any = keys[:-1]
snake_case__ : Tuple = get_indent(lines[1] ) + """, """.join([f"\"{k}\"" for k in sort_objects(__magic_name__ )] )
return "\n".join(__magic_name__ )
else:
# Finally we have to deal with imports fitting on one line
snake_case__ : List[Any] = _re_bracket_content.sub(_replace , __magic_name__ )
return import_statement
def UpperCamelCase__ ( __magic_name__ : List[str] , __magic_name__ : Optional[int]=True ) -> Optional[int]:
'''simple docstring'''
with open(__magic_name__ , """r""" ) as f:
snake_case__ : List[str] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
snake_case__ : Any = split_code_in_indented_blocks(
__magic_name__ , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__magic_name__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
snake_case__ : int = main_blocks[block_idx]
snake_case__ : Optional[Any] = block.split("""\n""" )
# Get to the start of the imports.
snake_case__ : Union[str, Any] = 0
while line_idx < len(__magic_name__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
snake_case__ : Dict = len(__magic_name__ )
else:
line_idx += 1
if line_idx >= len(__magic_name__ ):
continue
# Ignore beginning and last line: they don't contain anything.
snake_case__ : Any = """\n""".join(block_lines[line_idx:-1] )
snake_case__ : Optional[int] = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
snake_case__ : int = split_code_in_indented_blocks(__magic_name__ , indent_level=__magic_name__ )
# We have two categories of import key: list or _import_structure[key].append/extend
snake_case__ : Any = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
snake_case__ : Any = [(pattern.search(__magic_name__ ).groups()[0] if pattern.search(__magic_name__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
snake_case__ : Optional[int] = [(i, key) for i, key in enumerate(__magic_name__ ) if key is not None]
snake_case__ : Any = [x[0] for x in sorted(__magic_name__ , key=lambda __magic_name__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
snake_case__ : Dict = 0
snake_case__ : List[str] = []
for i in range(len(__magic_name__ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
snake_case__ : Union[str, Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(__magic_name__ )
count += 1
# And we put our main block back together with its first and last line.
snake_case__ : List[Any] = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(__magic_name__ ):
if check_only:
return True
else:
print(f"Overwriting {file}." )
with open(__magic_name__ , """w""" ) as f:
f.write("""\n""".join(__magic_name__ ) )
def UpperCamelCase__ ( __magic_name__ : int=True ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Dict = []
for root, _, files in os.walk(__magic_name__ ):
if "__init__.py" in files:
snake_case__ : List[Any] = sort_imports(os.path.join(__magic_name__ , """__init__.py""" ) , check_only=__magic_name__ )
if result:
snake_case__ : Optional[Any] = [os.path.join(__magic_name__ , """__init__.py""" )]
if len(__magic_name__ ) > 0:
raise ValueError(f"Would overwrite {len(__magic_name__ )} files, run `make style`." )
if __name__ == "__main__":
A_ : Tuple = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
A_ : Union[str, Any] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 38 | """simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def _UpperCAmelCase ( self : Dict ):
A__ : Optional[Any] =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(UpperCamelCase__ , "num_attention_heads" ) )
self.parent.assertTrue(hasattr(UpperCamelCase__ , "num_encoder_blocks" ) )
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=13 , UpperCamelCase__ : Tuple=64 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : Union[str, Any]=4 , UpperCamelCase__ : Dict=[2, 2, 2, 2] , UpperCamelCase__ : Union[str, Any]=[8, 4, 2, 1] , UpperCamelCase__ : Tuple=[16, 32, 64, 128] , UpperCamelCase__ : Optional[int]=[1, 4, 8, 16] , UpperCamelCase__ : Any=[1, 2, 4, 8] , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Dict="gelu" , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Optional[Any]=None , ):
A__ : Tuple =parent
A__ : List[Any] =batch_size
A__ : List[Any] =image_size
A__ : Union[str, Any] =num_channels
A__ : Optional[int] =num_encoder_blocks
A__ : Any =sr_ratios
A__ : Any =depths
A__ : List[Any] =hidden_sizes
A__ : List[Any] =downsampling_rates
A__ : List[str] =num_attention_heads
A__ : int =is_training
A__ : List[Any] =use_labels
A__ : Any =hidden_act
A__ : Dict =hidden_dropout_prob
A__ : int =attention_probs_dropout_prob
A__ : List[Any] =initializer_range
A__ : Tuple =num_labels
A__ : List[Any] =scope
def _UpperCAmelCase ( self : Optional[int] ):
A__ : List[str] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ : Any =None
if self.use_labels:
A__ : Tuple =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A__ : List[Any] =self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self : Tuple ):
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ):
A__ : Any =SegformerModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Dict =model(UpperCamelCase__ )
A__ : Optional[int] =self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def _UpperCAmelCase ( self : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ):
A__ : str =self.num_labels
A__ : Optional[Any] =SegformerForSemanticSegmentation(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Optional[Any] =model(UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
A__ : List[Any] =model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def _UpperCAmelCase ( self : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ):
A__ : Tuple =1
A__ : Tuple =SegformerForSemanticSegmentation(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : List[str] =torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(UpperCamelCase__ )
A__ : Dict =model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertGreater(result.loss , 0.0 )
def _UpperCAmelCase ( self : str ):
A__ : Union[str, Any] =self.prepare_config_and_inputs()
A__ , A__ , A__ : Tuple =config_and_inputs
A__ : Tuple ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Dict = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__magic_name__ : Optional[int] = (
{
"""feature-extraction""": SegformerModel,
"""image-classification""": SegformerForImageClassification,
"""image-segmentation""": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__magic_name__ : Dict = True
__magic_name__ : List[str] = False
__magic_name__ : Optional[Any] = False
__magic_name__ : str = False
def _UpperCAmelCase ( self : Union[str, Any] ):
A__ : Union[str, Any] =SegformerModelTester(self )
A__ : Tuple =SegformerConfigTester(self , config_class=UpperCamelCase__ )
def _UpperCAmelCase ( self : str ):
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : Dict ):
A__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _UpperCAmelCase ( self : Tuple ):
A__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*UpperCamelCase__ )
def _UpperCAmelCase ( self : Union[str, Any] ):
A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*UpperCamelCase__ )
@unittest.skip("SegFormer does not use inputs_embeds" )
def _UpperCAmelCase ( self : Dict ):
pass
@unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods" )
def _UpperCAmelCase ( self : Tuple ):
pass
def _UpperCAmelCase ( self : List[str] ):
A__ , A__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : int =model_class(UpperCamelCase__ )
A__ : Optional[int] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : Optional[int] =[*signature.parameters.keys()]
A__ : List[str] =["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def _UpperCAmelCase ( self : str ):
A__ , A__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
A__ : Union[str, Any] =True
for model_class in self.all_model_classes:
A__ : Optional[Any] =True
A__ : Union[str, Any] =False
A__ : str =True
A__ : Optional[int] =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : str =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Any =outputs.attentions
A__ : List[str] =sum(self.model_tester.depths )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ : Dict =True
A__ : str =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Any =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Union[str, Any] =outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# verify the first attentions (first block, first layer)
A__ : List[Any] =(self.model_tester.image_size // 4) ** 2
A__ : Tuple =(self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
A__ : Tuple =(self.model_tester.image_size // 32) ** 2
A__ : Optional[Any] =(self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
A__ : int =len(UpperCamelCase__ )
# Check attention is always last and order is fine
A__ : Optional[Any] =True
A__ : Any =True
A__ : Union[str, Any] =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Optional[Any] =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + 1 , len(UpperCamelCase__ ) )
A__ : Optional[Any] =outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# verify the first attentions (first block, first layer)
A__ : Union[str, Any] =(self.model_tester.image_size // 4) ** 2
A__ : Tuple =(self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _UpperCAmelCase ( self : List[Any] ):
def check_hidden_states_output(UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ):
A__ : Optional[Any] =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : List[Any] =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Optional[Any] =outputs.hidden_states
A__ : int =self.model_tester.num_encoder_blocks
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
A__ , A__ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Optional[Any] =True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ : str =True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _UpperCAmelCase ( self : Optional[int] ):
if not self.model_tester.is_training:
return
A__ , A__ : int =self.model_tester.prepare_config_and_inputs_for_common()
A__ : List[Any] =True
for model_class in self.all_model_classes:
if model_class in get_values(UpperCamelCase__ ):
continue
A__ : List[Any] =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
A__ : int =self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
A__ : Union[str, Any] =model(**UpperCamelCase__ ).loss
loss.backward()
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _UpperCAmelCase ( self : Tuple ):
pass
@slow
def _UpperCAmelCase ( self : Tuple ):
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Tuple =SegformerModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def lowercase ( ):
"""simple docstring"""
A__ : List[Any] =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
@slow
def _UpperCAmelCase ( self : Tuple ):
# only resize + normalize
A__ : List[Any] =SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCamelCase__ , align=UpperCamelCase__ , do_random_crop=UpperCamelCase__ )
A__ : Union[str, Any] =SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
UpperCamelCase__ )
A__ : Union[str, Any] =prepare_img()
A__ : Union[str, Any] =image_processor(images=UpperCamelCase__ , return_tensors="pt" )
A__ : int =encoded_inputs.pixel_values.to(UpperCamelCase__ )
with torch.no_grad():
A__ : int =model(UpperCamelCase__ )
A__ : Dict =torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A__ : Optional[int] =torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
@slow
def _UpperCAmelCase ( self : Union[str, Any] ):
# only resize + normalize
A__ : Dict =SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCamelCase__ , align=UpperCamelCase__ , do_random_crop=UpperCamelCase__ )
A__ : int =SegformerForSemanticSegmentation.from_pretrained(
"nvidia/segformer-b1-finetuned-cityscapes-1024-1024" ).to(UpperCamelCase__ )
A__ : Tuple =prepare_img()
A__ : str =image_processor(images=UpperCamelCase__ , return_tensors="pt" )
A__ : Optional[int] =encoded_inputs.pixel_values.to(UpperCamelCase__ )
with torch.no_grad():
A__ : int =model(UpperCamelCase__ )
A__ : List[str] =torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A__ : List[Any] =torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1E-1 ) )
@slow
def _UpperCAmelCase ( self : int ):
# only resize + normalize
A__ : Optional[Any] =SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCamelCase__ , align=UpperCamelCase__ , do_random_crop=UpperCamelCase__ )
A__ : List[Any] =SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
UpperCamelCase__ )
A__ : str =prepare_img()
A__ : Dict =image_processor(images=UpperCamelCase__ , return_tensors="pt" )
A__ : Any =encoded_inputs.pixel_values.to(UpperCamelCase__ )
with torch.no_grad():
A__ : Dict =model(UpperCamelCase__ )
A__ : Any =outputs.logits.detach().cpu()
A__ : Union[str, Any] =image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(500, 300)] )
A__ : List[str] =torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
A__ : int =image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ )
A__ : Tuple =torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
| 656 | 0 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
while a != 0:
snake_case_, snake_case_ = b % a, a
return b
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if gcd(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) != 1:
snake_case_ = F'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
snake_case_, snake_case_, snake_case_ = 1, 0, a
snake_case_, snake_case_, snake_case_ = 0, 1, m
while va != 0:
snake_case_ = ua // va
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m | 39 | """simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any]=13 , UpperCamelCase__ : Optional[int]=7 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : List[str]=99 , UpperCamelCase__ : Optional[Any]=32 , UpperCamelCase__ : Any=5 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : Union[str, Any]=37 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Optional[Any]=512 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : List[Any]=4 , ):
A__ : str =parent
A__ : List[str] =batch_size
A__ : Any =seq_length
A__ : List[str] =is_training
A__ : List[Any] =use_attention_mask
A__ : List[Any] =use_token_type_ids
A__ : Dict =use_labels
A__ : List[Any] =vocab_size
A__ : Optional[int] =hidden_size
A__ : Optional[Any] =num_hidden_layers
A__ : str =num_attention_heads
A__ : int =intermediate_size
A__ : Tuple =hidden_act
A__ : Tuple =hidden_dropout_prob
A__ : Dict =attention_probs_dropout_prob
A__ : Any =max_position_embeddings
A__ : Any =type_vocab_size
A__ : Union[str, Any] =type_sequence_label_size
A__ : Optional[Any] =initializer_range
A__ : int =num_choices
def _UpperCAmelCase ( self : Tuple ):
A__ : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : List[str] =None
if self.use_attention_mask:
A__ : Optional[int] =random_attention_mask([self.batch_size, self.seq_length] )
A__ : str =None
if self.use_token_type_ids:
A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ : Any =RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCAmelCase ( self : Tuple ):
A__ : Dict =self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ : str =config_and_inputs
A__ : Optional[Any] ={"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def _UpperCAmelCase ( self : int ):
A__ : str =self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ : Union[str, Any] =config_and_inputs
A__ : Union[str, Any] =True
A__ : List[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class __lowerCAmelCase ( _UpperCamelCase , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Union[str, Any] = True
__magic_name__ : Dict = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _UpperCAmelCase ( self : Optional[int] ):
A__ : Optional[int] =FlaxRobertaPreLayerNormModelTester(self )
@slow
def _UpperCAmelCase ( self : List[Any] ):
for model_class_name in self.all_model_classes:
A__ : Tuple =model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=UpperCamelCase__ )
A__ : Union[str, Any] =model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase__ )
@require_flax
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
@slow
def _UpperCAmelCase ( self : Tuple ):
A__ : Any =FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=UpperCamelCase__ )
A__ : Tuple =np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa )
A__ : str =model(UpperCamelCase__ )[0]
A__ : List[Any] =[1, 11, 50265]
self.assertEqual(list(output.shape ) , UpperCamelCase__ )
# compare the actual values for a slice.
A__ : Any =np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
@slow
def _UpperCAmelCase ( self : List[Any] ):
A__ : Union[str, Any] =FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=UpperCamelCase__ )
A__ : List[Any] =np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa )
A__ : Dict =model(UpperCamelCase__ )[0]
# compare the actual values for a slice.
A__ : Optional[Any] =np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
| 656 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.