code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """vivit"""
def __init__( self : Dict ,_a : Tuple=224 ,_a : Union[str, Any]=32 ,_a : Optional[int]=[2, 16, 16] ,_a : int=3 ,_a : str=768 ,_a : List[Any]=12 ,_a : str=12 ,_a : int=3072 ,_a : Optional[Any]="gelu_fast" ,_a : int=0.0 ,_a : List[Any]=0.0 ,_a : Any=0.02 ,_a : Optional[int]=1e-06 ,_a : List[str]=True ,**_a : List[Any] ,):
'''simple docstring'''
A_ : List[str] = hidden_size
A_ : List[str] = num_hidden_layers
A_ : Optional[int] = num_attention_heads
A_ : str = intermediate_size
A_ : List[str] = hidden_act
A_ : Any = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : Any = initializer_range
A_ : Dict = layer_norm_eps
A_ : Dict = image_size
A_ : str = num_frames
A_ : Optional[int] = tubelet_size
A_ : Any = num_channels
A_ : Tuple = qkv_bias
super().__init__(**_a )
| 665 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['YolosFeatureExtractor']
__magic_name__ = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCamelCase ( lowerCamelCase : float , lowerCamelCase : int):
A_ : Optional[Any] = u
for i in range(1 , lowerCamelCase):
A_ : str = temp * (u - i)
return temp
def lowerCamelCase ( ):
A_ : List[Any] = int(input("""enter the numbers of values: """))
A_ : list[list[float]] = []
for _ in range(lowerCamelCase):
y.append([])
for i in range(lowerCamelCase):
for j in range(lowerCamelCase):
y[i].append(lowerCamelCase)
A_ : Dict = 0
print("""enter the values of parameters in a list: """)
A_ : Optional[Any] = list(map(lowerCamelCase , input().split()))
print("""enter the values of corresponding parameters: """)
for i in range(lowerCamelCase):
A_ : List[Any] = float(input())
A_ : str = int(input("""enter the value to interpolate: """))
A_ : int = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , lowerCamelCase):
for j in range(n - i):
A_ : Optional[Any] = y[j + 1][i - 1] - y[j][i - 1]
A_ : Union[str, Any] = y[0][0]
for i in range(1 , lowerCamelCase):
summ += (ucal(lowerCamelCase , lowerCamelCase) * y[0][i]) / math.factorial(lowerCamelCase)
print(F'the value at {value} is {summ}')
if __name__ == "__main__":
main()
| 665 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 1 |
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"""files""" , [
["""full:README.md""", """dataset_infos.json"""],
["""empty:README.md""", """dataset_infos.json"""],
["""dataset_infos.json"""],
["""full:README.md"""],
] , )
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : Union[str, Any]):
A_ : Any = tmp_path_factory.mktemp("""dset_infos_dir""")
if "full:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""") as f:
f.write("""---\ndataset_info:\n dataset_size: 42\n---""")
if "empty:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""") as f:
f.write("""""")
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / """dataset_infos.json""" , """w""") as f:
f.write("""{\"default\": {\"dataset_size\": 42}}""")
A_ : int = DatasetInfosDict.from_directory(lowerCamelCase)
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"""dataset_info""" , [
DatasetInfo(),
DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""")}) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , ),
] , )
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : DatasetInfo):
A_ : Any = str(lowerCamelCase)
dataset_info.write_to_directory(lowerCamelCase)
A_ : Dict = DatasetInfo.from_directory(lowerCamelCase)
assert dataset_info == reloaded
assert os.path.exists(os.path.join(lowerCamelCase , """dataset_info.json"""))
def lowerCamelCase ( ):
A_ : Any = DatasetInfo(
description="""foo""" , citation="""bar""" , homepage="""https://foo.bar""" , license="""CC0""" , features=Features({"""a""": Value("""int32""")}) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train""", """num_examples""": 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
A_ : Optional[Any] = dataset_info._to_yaml_dict()
assert sorted(lowerCamelCase) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML)
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str))
A_ : List[Any] = yaml.safe_dump(lowerCamelCase)
A_ : Tuple = yaml.safe_load(lowerCamelCase)
assert dataset_info_yaml_dict == reloaded
def lowerCamelCase ( ):
A_ : Tuple = DatasetInfo()
A_ : Tuple = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"""dataset_infos_dict""" , [
DatasetInfosDict(),
DatasetInfosDict({"""default""": DatasetInfo()}),
DatasetInfosDict({"""my_config_name""": DatasetInfo()}),
DatasetInfosDict(
{
"""default""": DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""")}) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , )
}),
DatasetInfosDict(
{
"""v1""": DatasetInfo(dataset_size=42),
"""v2""": DatasetInfo(dataset_size=1337),
}),
] , )
def lowerCamelCase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : DatasetInfosDict):
A_ : Optional[Any] = str(lowerCamelCase)
dataset_infos_dict.write_to_directory(lowerCamelCase)
A_ : str = DatasetInfosDict.from_directory(lowerCamelCase)
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
A_ : List[str] = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
A_ : Optional[Any] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict())
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(lowerCamelCase , """README.md"""))
| 665 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : Tuple):
A_ : str = [0] * len(lowerCamelCase)
A_ : Union[str, Any] = []
A_ : Union[str, Any] = []
A_ : Tuple = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowerCamelCase)):
if indegree[i] == 0:
queue.append(lowerCamelCase)
while queue:
A_ : Any = queue.pop(0)
cnt += 1
topo.append(lowerCamelCase)
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(lowerCamelCase)
if cnt != len(lowerCamelCase):
print("""Cycle exists""")
else:
print(lowerCamelCase)
# Adjacency List of Graph
__magic_name__ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 665 | 1 |
'''simple docstring'''
from collections import defaultdict
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str):
A_ : List[Any] = first_str.lower().strip()
A_ : Union[str, Any] = second_str.lower().strip()
# Remove whitespace
A_ : Union[str, Any] = first_str.replace(""" """ , """""")
A_ : Union[str, Any] = second_str.replace(""" """ , """""")
# Strings of different lengths are not anagrams
if len(lowerCamelCase) != len(lowerCamelCase):
return False
# Default values for count should be 0
A_ : defaultdict[str, int] = defaultdict(lowerCamelCase)
# For each character in input strings,
# increment count in the corresponding
for i in range(len(lowerCamelCase)):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values())
if __name__ == "__main__":
from doctest import testmod
testmod()
__magic_name__ = input('Enter the first string ').strip()
__magic_name__ = input('Enter the second string ').strip()
__magic_name__ = check_anagrams(input_a, input_b)
print(f"""{input_a} and {input_b} are {'' if status else 'not '}anagrams.""")
| 665 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ,_a : List[Any] ,_a : Dict=13 ,_a : List[str]=7 ,_a : Dict=True ,_a : List[Any]=True ,_a : Dict=False ,_a : Optional[int]=True ,_a : List[Any]=99 ,_a : Any=32 ,_a : Optional[int]=5 ,_a : List[Any]=4 ,_a : int=37 ,_a : List[Any]="gelu" ,_a : List[str]=0.1 ,_a : Union[str, Any]=0.1 ,_a : Any=512 ,_a : int=16 ,_a : Optional[int]=2 ,_a : Any=0.02 ,_a : Any=3 ,_a : Any=4 ,_a : List[str]=None ,):
'''simple docstring'''
A_ : List[str] = parent
A_ : Any = batch_size
A_ : Tuple = seq_length
A_ : List[str] = is_training
A_ : Tuple = use_input_mask
A_ : Dict = use_token_type_ids
A_ : List[Any] = use_labels
A_ : Union[str, Any] = vocab_size
A_ : Any = hidden_size
A_ : str = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : str = intermediate_size
A_ : Tuple = hidden_act
A_ : Any = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : List[str] = max_position_embeddings
A_ : int = type_vocab_size
A_ : Union[str, Any] = type_sequence_label_size
A_ : Any = initializer_range
A_ : List[Any] = num_labels
A_ : Optional[Any] = num_choices
A_ : List[Any] = scope
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A_ : int = None
if self.use_input_mask:
A_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : Dict = None
if self.use_token_type_ids:
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A_ : str = None
A_ : Any = None
A_ : str = None
if self.use_labels:
A_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A_ : Optional[int] = ids_tensor([self.batch_size] ,self.num_choices )
A_ : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : Optional[Any] ):
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_a ,initializer_range=self.initializer_range ,)
def _a ( self : Union[str, Any] ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Any ,_a : Any ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Tuple ):
'''simple docstring'''
A_ : Any = LlamaModel(config=_a )
model.to(_a )
model.eval()
A_ : Optional[Any] = model(_a ,attention_mask=_a )
A_ : Optional[int] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Optional[int] ,_a : int ,_a : List[str] ,_a : Any ,_a : Any ,_a : Dict ,_a : List[str] ,_a : Optional[int] ,_a : Any ,_a : List[str] ,):
'''simple docstring'''
A_ : List[str] = True
A_ : Union[str, Any] = LlamaModel(_a )
model.to(_a )
model.eval()
A_ : Tuple = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,)
A_ : List[Any] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,)
A_ : int = model(_a ,attention_mask=_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Any ,_a : Any ,_a : Optional[int] ,_a : List[Any] ,_a : List[Any] ,_a : Dict ,_a : Tuple ,_a : Optional[int] ,_a : List[Any] ,_a : Union[str, Any] ,):
'''simple docstring'''
A_ : List[Any] = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
A_ : Dict = model(_a ,attention_mask=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : str ,_a : List[Any] ,_a : Dict ,_a : str ,_a : Tuple ,_a : Tuple ,_a : Tuple ,_a : Optional[Any] ,_a : Dict ,_a : Union[str, Any] ,):
'''simple docstring'''
A_ : Optional[Any] = True
A_ : Any = True
A_ : Tuple = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
# first forward pass
A_ : Optional[int] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,use_cache=_a ,)
A_ : Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A_ : int = ids_tensor((self.batch_size, 3) ,config.vocab_size )
A_ : List[Any] = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
A_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 )
A_ : int = torch.cat([input_mask, next_mask] ,dim=-1 )
A_ : List[str] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,output_hidden_states=_a ,)["""hidden_states"""][0]
A_ : Any = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,past_key_values=_a ,output_hidden_states=_a ,)["""hidden_states"""][0]
# select random slice
A_ : List[str] = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
A_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
A_ : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a ,_a ,atol=1e-3 ) )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Any = config_and_inputs
A_ : int = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
a_ = (LlamaForCausalLM,) if is_torch_available() else ()
a_ = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ = False
a_ = False
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = LlamaModelTester(self )
A_ : List[str] = ConfigTester(self ,config_class=_a ,hidden_size=37 )
def _a ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A_ : Dict = type
self.model_tester.create_and_check_model(*_a )
def _a ( self : List[Any] ):
'''simple docstring'''
A_ , A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[str] = 3
A_ : Any = input_dict["""input_ids"""]
A_ : Union[str, Any] = input_ids.ne(1 ).to(_a )
A_ : Union[str, Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
A_ : List[Any] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : int = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : Dict ):
'''simple docstring'''
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : str = 3
A_ : Union[str, Any] = """single_label_classification"""
A_ : Union[str, Any] = input_dict["""input_ids"""]
A_ : List[Any] = input_ids.ne(1 ).to(_a )
A_ : Dict = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
A_ : List[Any] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : List[str] = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Dict = 3
A_ : Dict = """multi_label_classification"""
A_ : Any = input_dict["""input_ids"""]
A_ : Optional[Any] = input_ids.ne(1 ).to(_a )
A_ : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
A_ : Optional[int] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : Any = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" )
def _a ( self : Any ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _a ( self : Optional[Any] ,_a : List[Any] ):
'''simple docstring'''
A_ , A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Tuple = ids_tensor([1, 10] ,config.vocab_size )
A_ : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A_ : int = LlamaModel(_a )
original_model.to(_a )
original_model.eval()
A_ : Tuple = original_model(_a ).last_hidden_state
A_ : Union[str, Any] = original_model(_a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A_ : Tuple = {"""type""": scaling_type, """factor""": 10.0}
A_ : int = LlamaModel(_a )
scaled_model.to(_a )
scaled_model.eval()
A_ : List[Any] = scaled_model(_a ).last_hidden_state
A_ : Any = scaled_model(_a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_a ,_a ,atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_a ,_a ,atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_a ,_a ,atol=1e-5 ) )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Any = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : List[str] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" ,device_map="""auto""" )
A_ : str = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
A_ : Union[str, Any] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : str = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : str ):
'''simple docstring'''
A_ : Dict = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" ,device_map="""auto""" )
A_ : Tuple = model(torch.tensor(_a ) )
# Expected mean on dim = -1
A_ : str = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : str = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" ,device_map="""auto""" )
A_ : int = model(torch.tensor(_a ) )
# Expected mean on dim = -1
A_ : Union[str, Any] = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : Optional[int] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
@unittest.skip(
"""Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" )
@slow
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : str = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" ,device_map="""auto""" )
A_ : Tuple = model(torch.tensor(_a ) )
A_ : Dict = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# fmt: off
A_ : List[str] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Model is curently gated""" )
@slow
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Union[str, Any] = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
A_ : List[str] = """Simply put, the theory of relativity states that """
A_ : Any = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" )
A_ : Union[str, Any] = tokenizer.encode(_a ,return_tensors="""pt""" )
A_ : List[str] = LlamaForCausalLM.from_pretrained(
"""meta-llama/Llama-2-13b-chat-hf""" ,device_map="""sequential""" ,use_safetensors=_a )
# greedy generation outputs
A_ : str = model.generate(_a ,max_new_tokens=64 ,top_p=_a ,temperature=1 ,do_sample=_a )
A_ : Optional[Any] = tokenizer.decode(generated_ids[0] ,skip_special_tokens=_a )
self.assertEqual(_a ,_a )
| 665 | 1 |
'''simple docstring'''
import math
def lowerCamelCase ( lowerCamelCase : float , lowerCamelCase : float):
return math.pow(lowerCamelCase , 2) - a
def lowerCamelCase ( lowerCamelCase : float):
return 2 * x
def lowerCamelCase ( lowerCamelCase : float):
A_ : List[Any] = 2.0
while start <= a:
A_ : str = math.pow(lowerCamelCase , 2)
return start
def lowerCamelCase ( lowerCamelCase : float , lowerCamelCase : int = 9999 , lowerCamelCase : float = 0.00_0000_0000_0001):
if a < 0:
raise ValueError("""math domain error""")
A_ : int = get_initial_point(lowerCamelCase)
for _ in range(lowerCamelCase):
A_ : str = value
A_ : Optional[Any] = value - fx(lowerCamelCase , lowerCamelCase) / fx_derivative(lowerCamelCase)
if abs(prev_value - value) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 665 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__magic_name__ = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
__magic_name__ = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
__magic_name__ = r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def _a ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ),
"""references""": datasets.Value("""string""" ),
} ) ,homepage="""https://github.com/hendrycks/math""" ,codebase_urls=["""https://github.com/hendrycks/math"""] ,)
def _a ( self : List[Any] ,_a : Union[str, Any] ,_a : Optional[int] ):
'''simple docstring'''
A_ : Union[str, Any] = 0.0
for i, j in zip(_a ,_a ):
n_correct += 1.0 if math_equivalence.is_equiv(_a ,_a ) else 0.0
A_ : List[str] = n_correct / len(_a )
return {
"accuracy": accuracy,
}
| 665 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Any ):
'''simple docstring'''
A_ : Optional[int] = StableDiffusionKDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
A_ : int = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
sd_pipe.set_scheduler("""sample_euler""" )
A_ : Tuple = """A painting of a squirrel eating a burger"""
A_ : Optional[Any] = torch.manual_seed(0 )
A_ : List[str] = sd_pipe([prompt] ,generator=_a ,guidance_scale=9.0 ,num_inference_steps=20 ,output_type="""np""" )
A_ : int = output.images
A_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ : int = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Any ):
'''simple docstring'''
A_ : Dict = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
A_ : List[str] = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
sd_pipe.set_scheduler("""sample_euler""" )
A_ : int = """A painting of a squirrel eating a burger"""
A_ : str = torch.manual_seed(0 )
A_ : Union[str, Any] = sd_pipe([prompt] ,generator=_a ,guidance_scale=9.0 ,num_inference_steps=20 ,output_type="""np""" )
A_ : List[Any] = output.images
A_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ : Any = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : List[Any] = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
A_ : Union[str, Any] = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
sd_pipe.set_scheduler("""sample_dpmpp_2m""" )
A_ : Optional[Any] = """A painting of a squirrel eating a burger"""
A_ : Dict = torch.manual_seed(0 )
A_ : Optional[int] = sd_pipe(
[prompt] ,generator=_a ,guidance_scale=7.5 ,num_inference_steps=15 ,output_type="""np""" ,use_karras_sigmas=_a ,)
A_ : List[str] = output.images
A_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ : int = np.array(
[0.11381689, 0.12112921, 0.1389457, 0.12549606, 0.1244964, 0.10831517, 0.11562866, 0.10867816, 0.10499048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 665 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__magic_name__ = logging.get_logger(__name__)
# TODO: upload to AWS
__magic_name__ = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """retribert"""
def __init__( self : int ,_a : Dict=30522 ,_a : List[Any]=768 ,_a : Optional[Any]=8 ,_a : str=12 ,_a : str=3072 ,_a : Tuple="gelu" ,_a : Optional[int]=0.1 ,_a : Dict=0.1 ,_a : List[Any]=512 ,_a : Union[str, Any]=2 ,_a : Tuple=0.02 ,_a : List[str]=1e-12 ,_a : Dict=True ,_a : Tuple=128 ,_a : Optional[int]=0 ,**_a : Tuple ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,**_a )
A_ : Dict = vocab_size
A_ : int = hidden_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : Tuple = hidden_act
A_ : int = intermediate_size
A_ : Tuple = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : int = max_position_embeddings
A_ : Any = type_vocab_size
A_ : Optional[int] = initializer_range
A_ : Dict = layer_norm_eps
A_ : str = share_encoders
A_ : List[Any] = projection_dim
| 665 | 1 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 665 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'spiece.model'}
__magic_name__ = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
__magic_name__ = {
'google/bigbird-roberta-base': 4_096,
'google/bigbird-roberta-large': 4_096,
'google/bigbird-base-trivia-itc': 4_096,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
a_ = []
def __init__( self : Optional[int] ,_a : int ,_a : Optional[Any]="<unk>" ,_a : int="<s>" ,_a : str="</s>" ,_a : Optional[Any]="<pad>" ,_a : Tuple="[SEP]" ,_a : Tuple="[MASK]" ,_a : Union[str, Any]="[CLS]" ,_a : Optional[Dict[str, Any]] = None ,**_a : Any ,):
'''simple docstring'''
A_ : Dict = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else bos_token
A_ : Union[str, Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else eos_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else unk_token
A_ : Union[str, Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else pad_token
A_ : Any = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else cls_token
A_ : Optional[int] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
A_ : List[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
A_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a ,eos_token=_a ,unk_token=_a ,pad_token=_a ,sep_token=_a ,mask_token=_a ,cls_token=_a ,sp_model_kwargs=self.sp_model_kwargs ,**_a ,)
A_ : Optional[int] = vocab_file
A_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def _a ( self : Union[str, Any] ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Tuple = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = self.__dict__.copy()
A_ : Union[str, Any] = None
return state
def __setstate__( self : List[Any] ,_a : Any ):
'''simple docstring'''
A_ : Tuple = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
A_ : Tuple = {}
A_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self : Union[str, Any] ,_a : str ):
'''simple docstring'''
return self.sp_model.encode(_a ,out_type=_a )
def _a ( self : Optional[int] ,_a : str ):
'''simple docstring'''
return self.sp_model.piece_to_id(_a )
def _a ( self : int ,_a : Optional[int] ):
'''simple docstring'''
A_ : List[str] = self.sp_model.IdToPiece(_a )
return token
def _a ( self : Dict ,_a : int ):
'''simple docstring'''
A_ : int = []
A_ : Any = """"""
A_ : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
A_ : Dict = True
A_ : Union[str, Any] = []
else:
current_sub_tokens.append(_a )
A_ : str = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def _a ( self : int ,_a : List[int] ,_a : bool = False ,_a : bool = None ,_a : bool = True ,**_a : str ,):
'''simple docstring'''
A_ : Any = kwargs.pop("""use_source_tokenizer""" ,_a )
A_ : Union[str, Any] = self.convert_ids_to_tokens(_a ,skip_special_tokens=_a )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
A_ : str = []
A_ : int = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
A_ : List[str] = []
sub_texts.append(_a )
else:
current_sub_text.append(_a )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
A_ : Optional[int] = re.sub(r""" (\[(MASK|SEP)\])""" ,r"""\1""" ,""" """.join(_a ) )
else:
A_ : Tuple = """""".join(_a )
A_ : str = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
A_ : Optional[Any] = self.clean_up_tokenization(_a )
return clean_text
else:
return text
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_a )
elif not os.path.isfile(self.vocab_file ):
with open(_a ,"""wb""" ) as fi:
A_ : str = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
def _a ( self : Optional[Any] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ : List[Any] = [self.cls_token_id]
A_ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self : Optional[int] ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
def _a ( self : Tuple ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Tuple = [self.sep_token_id]
A_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 665 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.txt'}
__magic_name__ = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__magic_name__ = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__magic_name__ = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_INIT_CONFIGURATION
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ConvBertTokenizer
def __init__( self : str ,_a : Dict=None ,_a : List[Any]=None ,_a : Dict=True ,_a : List[str]="[UNK]" ,_a : Any="[SEP]" ,_a : str="[PAD]" ,_a : List[Any]="[CLS]" ,_a : List[str]="[MASK]" ,_a : Union[str, Any]=True ,_a : Any=None ,**_a : Optional[int] ,):
'''simple docstring'''
super().__init__(
_a ,tokenizer_file=_a ,do_lower_case=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,tokenize_chinese_chars=_a ,strip_accents=_a ,**_a ,)
A_ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,_a ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,_a ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,_a ) != tokenize_chinese_chars
):
A_ : Dict = getattr(_a ,normalizer_state.pop("""type""" ) )
A_ : str = do_lower_case
A_ : Any = strip_accents
A_ : int = tokenize_chinese_chars
A_ : Tuple = normalizer_class(**_a )
A_ : Any = do_lower_case
def _a ( self : List[Any] ,_a : List[Any] ,_a : Any=None ):
'''simple docstring'''
A_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a ( self : Dict ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : int = [self.sep_token_id]
A_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
A_ : List[Any] = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
| 665 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
a_ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a_ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _a ( self : List[str] ,_a : int ,_a : Any ,_a : int ):
'''simple docstring'''
A_ : Dict = TextaTextGenerationPipeline(model=_a ,tokenizer=_a )
return generator, ["Something to write", "Something else"]
def _a ( self : str ,_a : Union[str, Any] ,_a : int ):
'''simple docstring'''
A_ : Any = generator("""Something there""" )
self.assertEqual(_a ,[{"""generated_text""": ANY(_a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
A_ : List[Any] = generator(["""This is great !""", """Something else"""] ,num_return_sequences=2 ,do_sample=_a )
self.assertEqual(
_a ,[
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] ,)
A_ : List[str] = generator(
["""This is great !""", """Something else"""] ,num_return_sequences=2 ,batch_size=2 ,do_sample=_a )
self.assertEqual(
_a ,[
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] ,)
with self.assertRaises(_a ):
generator(4 )
@require_torch
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : int = pipeline("""text2text-generation""" ,model="""patrickvonplaten/t5-tiny-random""" ,framework="""pt""" )
# do_sample=False necessary for reproducibility
A_ : Tuple = generator("""Something there""" ,do_sample=_a )
self.assertEqual(_a ,[{"""generated_text""": """"""}] )
A_ : Optional[int] = 3
A_ : Tuple = generator(
"""Something there""" ,num_return_sequences=_a ,num_beams=_a ,)
A_ : Optional[Any] = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(_a ,_a )
A_ : Optional[int] = generator("""This is a test""" ,do_sample=_a ,num_return_sequences=2 ,return_tensors=_a )
self.assertEqual(
_a ,[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] ,)
A_ : Dict = generator.model.config.eos_token_id
A_ : Optional[int] = """<pad>"""
A_ : List[Any] = generator(
["""This is a test""", """This is a second test"""] ,do_sample=_a ,num_return_sequences=2 ,batch_size=2 ,return_tensors=_a ,)
self.assertEqual(
_a ,[
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] ,)
@require_tf
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = pipeline("""text2text-generation""" ,model="""patrickvonplaten/t5-tiny-random""" ,framework="""tf""" )
# do_sample=False necessary for reproducibility
A_ : Dict = generator("""Something there""" ,do_sample=_a )
self.assertEqual(_a ,[{"""generated_text""": """"""}] )
| 665 | 1 |
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
'''simple docstring'''
@staticmethod
def _a ( *_a : Any ,**_a : List[str] ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def _a ( self : Any ):
'''simple docstring'''
A_ : Optional[int] = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" ,)
A_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
A_ : str = image_classifier(_a ,candidate_labels=["""a""", """b""", """c"""] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(_a ) ,[
[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}],
[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """c"""}, {"""score""": 0.333, """label""": """b"""}],
] ,)
A_ : Union[str, Any] = image_classifier([image] * 5 ,candidate_labels=["""A""", """B""", """C"""] ,batch_size=2 )
self.assertEqual(
nested_simplify(_a ) ,[
[
{"""score""": 0.333, """label""": ANY(_a )},
{"""score""": 0.333, """label""": ANY(_a )},
{"""score""": 0.333, """label""": ANY(_a )},
],
[
{"""score""": 0.333, """label""": ANY(_a )},
{"""score""": 0.333, """label""": ANY(_a )},
{"""score""": 0.333, """label""": ANY(_a )},
],
[
{"""score""": 0.333, """label""": ANY(_a )},
{"""score""": 0.333, """label""": ANY(_a )},
{"""score""": 0.333, """label""": ANY(_a )},
],
[
{"""score""": 0.333, """label""": ANY(_a )},
{"""score""": 0.333, """label""": ANY(_a )},
{"""score""": 0.333, """label""": ANY(_a )},
],
[
{"""score""": 0.333, """label""": ANY(_a )},
{"""score""": 0.333, """label""": ANY(_a )},
{"""score""": 0.333, """label""": ANY(_a )},
],
] ,)
@require_tf
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" ,framework="""tf""" )
A_ : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
A_ : List[Any] = image_classifier(_a ,candidate_labels=["""a""", """b""", """c"""] )
self.assertEqual(
nested_simplify(_a ) ,[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}] ,)
A_ : List[str] = image_classifier([image] * 5 ,candidate_labels=["""A""", """B""", """C"""] ,batch_size=2 )
self.assertEqual(
nested_simplify(_a ) ,[
[
{"""score""": 0.333, """label""": ANY(_a )},
{"""score""": 0.333, """label""": ANY(_a )},
{"""score""": 0.333, """label""": ANY(_a )},
],
[
{"""score""": 0.333, """label""": ANY(_a )},
{"""score""": 0.333, """label""": ANY(_a )},
{"""score""": 0.333, """label""": ANY(_a )},
],
[
{"""score""": 0.333, """label""": ANY(_a )},
{"""score""": 0.333, """label""": ANY(_a )},
{"""score""": 0.333, """label""": ANY(_a )},
],
[
{"""score""": 0.333, """label""": ANY(_a )},
{"""score""": 0.333, """label""": ANY(_a )},
{"""score""": 0.333, """label""": ANY(_a )},
],
[
{"""score""": 0.333, """label""": ANY(_a )},
{"""score""": 0.333, """label""": ANY(_a )},
{"""score""": 0.333, """label""": ANY(_a )},
],
] ,)
@slow
@require_torch
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Dict = pipeline(
task="""zero-shot-image-classification""" ,model="""openai/clip-vit-base-patch32""" ,)
# This is an image of 2 cats with remotes and no planes
A_ : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
A_ : Any = image_classifier(_a ,candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(_a ) ,[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
] ,)
A_ : List[str] = image_classifier([image] * 5 ,candidate_labels=["""cat""", """plane""", """remote"""] ,batch_size=2 )
self.assertEqual(
nested_simplify(_a ) ,[
[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
],
]
* 5 ,)
@slow
@require_tf
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Dict = pipeline(
task="""zero-shot-image-classification""" ,model="""openai/clip-vit-base-patch32""" ,framework="""tf""" )
# This is an image of 2 cats with remotes and no planes
A_ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
A_ : List[str] = image_classifier(_a ,candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(_a ) ,[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
] ,)
A_ : Tuple = image_classifier([image] * 5 ,candidate_labels=["""cat""", """plane""", """remote"""] ,batch_size=2 )
self.assertEqual(
nested_simplify(_a ) ,[
[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
],
]
* 5 ,)
| 665 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """gpt_bigcode"""
a_ = ["""past_key_values"""]
a_ = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Optional[int] ,_a : Optional[int]=50257 ,_a : Dict=1024 ,_a : Union[str, Any]=768 ,_a : Union[str, Any]=12 ,_a : Union[str, Any]=12 ,_a : Tuple=None ,_a : int="gelu_pytorch_tanh" ,_a : Optional[Any]=0.1 ,_a : List[str]=0.1 ,_a : Union[str, Any]=0.1 ,_a : List[Any]=1e-5 ,_a : List[str]=0.02 ,_a : Any=True ,_a : Union[str, Any]=True ,_a : Tuple=50256 ,_a : Optional[int]=50256 ,_a : int=True ,_a : Optional[int]=True ,_a : Optional[int]=True ,**_a : List[str] ,):
'''simple docstring'''
A_ : Optional[Any] = vocab_size
A_ : int = n_positions
A_ : Union[str, Any] = n_embd
A_ : int = n_layer
A_ : Optional[int] = n_head
A_ : Union[str, Any] = n_inner
A_ : List[Any] = activation_function
A_ : Dict = resid_pdrop
A_ : int = embd_pdrop
A_ : Optional[int] = attn_pdrop
A_ : Union[str, Any] = layer_norm_epsilon
A_ : int = initializer_range
A_ : Union[str, Any] = scale_attn_weights
A_ : List[str] = use_cache
A_ : Tuple = attention_softmax_in_fpaa
A_ : List[str] = scale_attention_softmax_in_fpaa
A_ : Union[str, Any] = multi_query
A_ : Any = bos_token_id
A_ : Optional[int] = eos_token_id
super().__init__(bos_token_id=_a ,eos_token_id=_a ,**_a )
| 665 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """biogpt"""
def __init__( self : str ,_a : Optional[int]=42384 ,_a : Union[str, Any]=1024 ,_a : List[str]=24 ,_a : Union[str, Any]=16 ,_a : Dict=4096 ,_a : Union[str, Any]="gelu" ,_a : Any=0.1 ,_a : Union[str, Any]=0.1 ,_a : Tuple=1024 ,_a : Union[str, Any]=0.02 ,_a : Union[str, Any]=1e-12 ,_a : Dict=True ,_a : Dict=True ,_a : Union[str, Any]=0.0 ,_a : int=0.0 ,_a : Union[str, Any]=1 ,_a : Tuple=0 ,_a : List[Any]=2 ,**_a : Dict ,):
'''simple docstring'''
A_ : Dict = vocab_size
A_ : Tuple = max_position_embeddings
A_ : Optional[int] = hidden_size
A_ : Tuple = num_hidden_layers
A_ : str = num_attention_heads
A_ : Dict = intermediate_size
A_ : Optional[int] = hidden_act
A_ : str = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : Dict = initializer_range
A_ : List[str] = layer_norm_eps
A_ : Dict = scale_embedding
A_ : Optional[Any] = use_cache
A_ : Union[str, Any] = layerdrop
A_ : Optional[Any] = activation_dropout
super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a )
| 665 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
__magic_name__ = {
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
__magic_name__ = {
'allenai/longformer-base-4096': 4_096,
'allenai/longformer-large-4096': 4_096,
'allenai/longformer-large-4096-finetuned-triviaqa': 4_096,
'allenai/longformer-base-4096-extra.pos.embd.only': 4_096,
'allenai/longformer-large-4096-extra.pos.embd.only': 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCamelCase ( ):
A_ : Union[str, Any] = (
list(range(ord("""!""") , ord("""~""") + 1)) + list(range(ord("""¡""") , ord("""¬""") + 1)) + list(range(ord("""®""") , ord("""ÿ""") + 1))
)
A_ : Optional[Any] = bs[:]
A_ : List[str] = 0
for b in range(2**8):
if b not in bs:
bs.append(lowerCamelCase)
cs.append(2**8 + n)
n += 1
A_ : List[Any] = [chr(lowerCamelCase) for n in cs]
return dict(zip(lowerCamelCase , lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : int):
A_ : int = set()
A_ : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
A_ : List[str] = char
return pairs
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self : int ,_a : Tuple ,_a : Union[str, Any] ,_a : Optional[Any]="replace" ,_a : Union[str, Any]="<s>" ,_a : Union[str, Any]="</s>" ,_a : int="</s>" ,_a : List[str]="<s>" ,_a : List[Any]="<unk>" ,_a : Any="<pad>" ,_a : Dict="<mask>" ,_a : Optional[int]=False ,**_a : List[Any] ,):
'''simple docstring'''
A_ : Dict = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else bos_token
A_ : Optional[int] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else eos_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else sep_token
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else cls_token
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else unk_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A_ : Any = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
super().__init__(
errors=_a ,bos_token=_a ,eos_token=_a ,unk_token=_a ,sep_token=_a ,cls_token=_a ,pad_token=_a ,mask_token=_a ,add_prefix_space=_a ,**_a ,)
with open(_a ,encoding="""utf-8""" ) as vocab_handle:
A_ : str = json.load(_a )
A_ : Optional[int] = {v: k for k, v in self.encoder.items()}
A_ : List[str] = errors # how to handle errors in decoding
A_ : List[str] = bytes_to_unicode()
A_ : str = {v: k for k, v in self.byte_encoder.items()}
with open(_a ,encoding="""utf-8""" ) as merges_handle:
A_ : Any = merges_handle.read().split("""\n""" )[1:-1]
A_ : str = [tuple(merge.split() ) for merge in bpe_merges]
A_ : int = dict(zip(_a ,range(len(_a ) ) ) )
A_ : List[Any] = {}
A_ : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A_ : Optional[Any] = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def _a ( self : Any ):
'''simple docstring'''
return len(self.encoder )
def _a ( self : str ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def _a ( self : int ,_a : int ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A_ : Optional[int] = tuple(_a )
A_ : Any = get_pairs(_a )
if not pairs:
return token
while True:
A_ : Optional[Any] = min(_a ,key=lambda _a : self.bpe_ranks.get(_a ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A_ , A_ : Dict = bigram
A_ : int = []
A_ : Optional[Any] = 0
while i < len(_a ):
try:
A_ : List[str] = word.index(_a ,_a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A_ : Tuple = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A_ : str = tuple(_a )
A_ : str = new_word
if len(_a ) == 1:
break
else:
A_ : int = get_pairs(_a )
A_ : Optional[int] = """ """.join(_a )
A_ : List[str] = word
return word
def _a ( self : Dict ,_a : Optional[int] ):
'''simple docstring'''
A_ : Any = []
for token in re.findall(self.pat ,_a ):
A_ : Any = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_a ).split(""" """ ) )
return bpe_tokens
def _a ( self : Union[str, Any] ,_a : Optional[int] ):
'''simple docstring'''
return self.encoder.get(_a ,self.encoder.get(self.unk_token ) )
def _a ( self : int ,_a : Dict ):
'''simple docstring'''
return self.decoder.get(_a )
def _a ( self : Optional[int] ,_a : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = """""".join(_a )
A_ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors )
return text
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_a ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_a ,ensure_ascii=_a ) + """\n""" )
A_ : int = 0
with open(_a ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _a : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
A_ : Dict = token_index
writer.write(""" """.join(_a ) + """\n""" )
index += 1
return vocab_file, merge_file
def _a ( self : List[str] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ : int = [self.cls_token_id]
A_ : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _a ( self : int ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def _a ( self : Any ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Union[str, Any] = [self.sep_token_id]
A_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a ( self : str ,_a : Optional[int] ,_a : Union[str, Any]=False ,**_a : Dict ):
'''simple docstring'''
A_ : Any = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_a ) > 0 and not text[0].isspace()):
A_ : Optional[int] = """ """ + text
return (text, kwargs)
| 665 | 1 |
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCamelCase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Union[str, Any]):
A_ : Optional[int] = old_name
if "patch_embed" in old_name:
A_ , A_ , A_ : Tuple = old_name.split(""".""")
if layer == "0":
A_ : List[Any] = old_name.replace("""0""" , """convolution1""")
elif layer == "1":
A_ : Optional[int] = old_name.replace("""1""" , """batchnorm_before""")
elif layer == "3":
A_ : int = old_name.replace("""3""" , """convolution2""")
else:
A_ : List[Any] = old_name.replace("""4""" , """batchnorm_after""")
if "network" in old_name and re.search(r"""\d\.\d""" , lowerCamelCase):
A_ : Tuple = r"""\b\d{2}\b"""
if bool(re.search(lowerCamelCase , lowerCamelCase)):
A_ : Optional[Any] = re.search(r"""\d\.\d\d.""" , lowerCamelCase).group()
else:
A_ : List[Any] = re.search(r"""\d\.\d.""" , lowerCamelCase).group()
if int(match[0]) < 6:
A_ : List[Any] = old_name.replace(lowerCamelCase , """""")
A_ : Tuple = trimmed_name.replace("""network""" , match[0] + """.meta4D_layers.blocks.""" + match[2:-1])
A_ : str = """intermediate_stages.""" + trimmed_name
else:
A_ : Dict = old_name.replace(lowerCamelCase , """""")
if int(match[2]) < num_meta4D_last_stage:
A_ : Optional[int] = trimmed_name.replace("""network""" , """meta4D_layers.blocks.""" + match[2])
else:
A_ : Optional[Any] = str(int(match[2]) - num_meta4D_last_stage)
A_ : Optional[Any] = trimmed_name.replace("""network""" , """meta3D_layers.blocks.""" + layer_index)
if "norm1" in old_name:
A_ : int = trimmed_name.replace("""norm1""" , """layernorm1""")
elif "norm2" in old_name:
A_ : List[Any] = trimmed_name.replace("""norm2""" , """layernorm2""")
elif "fc1" in old_name:
A_ : Tuple = trimmed_name.replace("""fc1""" , """linear_in""")
elif "fc2" in old_name:
A_ : Any = trimmed_name.replace("""fc2""" , """linear_out""")
A_ : List[Any] = """last_stage.""" + trimmed_name
elif "network" in old_name and re.search(r""".\d.""" , lowerCamelCase):
A_ : List[Any] = old_name.replace("""network""" , """intermediate_stages""")
if "fc" in new_name:
A_ : Union[str, Any] = new_name.replace("""fc""" , """convolution""")
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
A_ : Tuple = new_name.replace("""norm1""" , """batchnorm_before""")
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
A_ : Optional[int] = new_name.replace("""norm2""" , """batchnorm_after""")
if "proj" in new_name:
A_ : Optional[Any] = new_name.replace("""proj""" , """projection""")
if "dist_head" in new_name:
A_ : Union[str, Any] = new_name.replace("""dist_head""" , """distillation_classifier""")
elif "head" in new_name:
A_ : Optional[Any] = new_name.replace("""head""" , """classifier""")
elif "patch_embed" in new_name:
A_ : List[str] = """efficientformer.""" + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
A_ : Optional[int] = new_name.replace("""norm""" , """layernorm""")
A_ : str = """efficientformer.""" + new_name
else:
A_ : Dict = """efficientformer.encoder.""" + new_name
return new_name
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : Union[str, Any]):
for key in checkpoint.copy().keys():
A_ : List[Any] = checkpoint.pop(lowerCamelCase)
A_ : Union[str, Any] = val
return checkpoint
def lowerCamelCase ( ):
A_ : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Union[str, Any] = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase).raw)
return image
def lowerCamelCase ( lowerCamelCase : Path , lowerCamelCase : Path , lowerCamelCase : Path , lowerCamelCase : bool):
A_ : Optional[Any] = torch.load(lowerCamelCase , map_location="""cpu""")["""model"""]
A_ : str = EfficientFormerConfig.from_json_file(lowerCamelCase)
A_ : Optional[int] = EfficientFormerForImageClassificationWithTeacher(lowerCamelCase)
A_ : Optional[Any] = """_""".join(checkpoint_path.split("""/""")[-1].split(""".""")[0].split("""_""")[:-1])
A_ : Any = config.depths[-1] - config.num_metaad_blocks + 1
A_ : str = convert_torch_checkpoint(lowerCamelCase , lowerCamelCase)
model.load_state_dict(lowerCamelCase)
model.eval()
A_ : Dict = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
# prepare image
A_ : Tuple = prepare_img()
A_ : Optional[int] = 256
A_ : List[str] = 224
A_ : Union[str, Any] = EfficientFormerImageProcessor(
size={"""shortest_edge""": image_size} , crop_size={"""height""": crop_size, """width""": crop_size} , resample=pillow_resamplings["""bicubic"""] , )
A_ : List[str] = processor(images=lowerCamelCase , return_tensors="""pt""").pixel_values
# original processing pipeline
A_ : Union[str, Any] = Compose(
[
Resize(lowerCamelCase , interpolation=pillow_resamplings["""bicubic"""]),
CenterCrop(lowerCamelCase),
ToTensor(),
Normalize(lowerCamelCase , lowerCamelCase),
])
A_ : Dict = image_transforms(lowerCamelCase).unsqueeze(0)
assert torch.allclose(lowerCamelCase , lowerCamelCase)
A_ : int = model(lowerCamelCase)
A_ : Optional[int] = outputs.logits
A_ : Dict = (1, 1000)
if "l1" in model_name:
A_ : str = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328])
assert torch.allclose(logits[0, :10] , lowerCamelCase , atol=1E-3)
assert logits.shape == expected_shape
elif "l3" in model_name:
A_ : int = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127])
assert torch.allclose(logits[0, :10] , lowerCamelCase , atol=1E-3)
assert logits.shape == expected_shape
elif "l7" in model_name:
A_ : Union[str, Any] = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878])
assert logits.shape == expected_shape
else:
raise ValueError(
F'Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7')
# Save Checkpoints
Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase)
model.save_pretrained(lowerCamelCase)
print(F'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}')
processor.save_pretrained(lowerCamelCase)
print(F'Processor successfuly saved at {pytorch_dump_path}')
if push_to_hub:
print("""Pushing model to the hub...""")
model.push_to_hub(
repo_id=F'Bearnardd/{pytorch_dump_path}' , commit_message="""Add model""" , use_temp_dir=lowerCamelCase , )
processor.push_to_hub(
repo_id=F'Bearnardd/{pytorch_dump_path}' , commit_message="""Add image processor""" , use_temp_dir=lowerCamelCase , )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path',
default=None,
type=str,
required=True,
help='Path to EfficientFormer pytorch checkpoint.',
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for EfficientFormer model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
parser.set_defaults(push_to_hub=True)
__magic_name__ = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 665 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.txt'}
__magic_name__ = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__magic_name__ = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__magic_name__ = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_INIT_CONFIGURATION
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ConvBertTokenizer
def __init__( self : str ,_a : Dict=None ,_a : List[Any]=None ,_a : Dict=True ,_a : List[str]="[UNK]" ,_a : Any="[SEP]" ,_a : str="[PAD]" ,_a : List[Any]="[CLS]" ,_a : List[str]="[MASK]" ,_a : Union[str, Any]=True ,_a : Any=None ,**_a : Optional[int] ,):
'''simple docstring'''
super().__init__(
_a ,tokenizer_file=_a ,do_lower_case=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,tokenize_chinese_chars=_a ,strip_accents=_a ,**_a ,)
A_ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,_a ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,_a ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,_a ) != tokenize_chinese_chars
):
A_ : Dict = getattr(_a ,normalizer_state.pop("""type""" ) )
A_ : str = do_lower_case
A_ : Any = strip_accents
A_ : int = tokenize_chinese_chars
A_ : Tuple = normalizer_class(**_a )
A_ : Any = do_lower_case
def _a ( self : List[Any] ,_a : List[Any] ,_a : Any=None ):
'''simple docstring'''
A_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a ( self : Dict ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : int = [self.sep_token_id]
A_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
A_ : List[Any] = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
| 665 | 1 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
__magic_name__ = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n'
__magic_name__ = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n'
__magic_name__ = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}'
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Dict , lowerCamelCase : bool , lowerCamelCase : Optional[Dict[int, int]] = None , lowerCamelCase : bool = False , ):
if label_map is not None:
for old_id, new_id in label_map.items():
A_ : List[Any] = new_id
# turn into Numpy arrays
A_ : int = np.array(lowerCamelCase)
A_ : int = np.array(lowerCamelCase)
if reduce_labels:
A_ : str = 255
A_ : List[str] = label - 1
A_ : str = 255
A_ : Optional[int] = label != ignore_index
A_ : Tuple = np.not_equal(lowerCamelCase , lowerCamelCase)
A_ : List[str] = pred_label[mask]
A_ : str = np.array(lowerCamelCase)[mask]
A_ : Union[str, Any] = pred_label[pred_label == label]
A_ : List[str] = np.histogram(lowerCamelCase , bins=lowerCamelCase , range=(0, num_labels - 1))[0]
A_ : List[Any] = np.histogram(lowerCamelCase , bins=lowerCamelCase , range=(0, num_labels - 1))[0]
A_ : int = np.histogram(lowerCamelCase , bins=lowerCamelCase , range=(0, num_labels - 1))[0]
A_ : List[str] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : bool , lowerCamelCase : Optional[Dict[int, int]] = None , lowerCamelCase : bool = False , ):
A_ : Union[str, Any] = np.zeros((num_labels,) , dtype=np.floataa)
A_ : List[Any] = np.zeros((num_labels,) , dtype=np.floataa)
A_ : int = np.zeros((num_labels,) , dtype=np.floataa)
A_ : Any = np.zeros((num_labels,) , dtype=np.floataa)
for result, gt_seg_map in zip(lowerCamelCase , lowerCamelCase):
A_ , A_ , A_ , A_ : List[Any] = intersect_and_union(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def lowerCamelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : bool , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[Dict[int, int]] = None , lowerCamelCase : bool = False , ):
A_ , A_ , A_ , A_ : Dict = total_intersect_and_union(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
# compute metrics
A_ : Union[str, Any] = {}
A_ : Any = total_area_intersect.sum() / total_area_label.sum()
A_ : int = total_area_intersect / total_area_union
A_ : Optional[Any] = total_area_intersect / total_area_label
A_ : Optional[Any] = np.nanmean(lowerCamelCase)
A_ : Union[str, Any] = np.nanmean(lowerCamelCase)
A_ : Optional[int] = all_acc
A_ : Tuple = iou
A_ : str = acc
if nan_to_num is not None:
A_ : int = {metric: np.nan_to_num(lowerCamelCase , nan=lowerCamelCase) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def _a ( self : Tuple ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"""predictions""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
"""references""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
} ) ,reference_urls=[
"""https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"""
] ,)
def _a ( self : str ,_a : int ,_a : str ,_a : int ,_a : bool ,_a : Optional[int] = None ,_a : Optional[Dict[int, int]] = None ,_a : bool = False ,):
'''simple docstring'''
A_ : Optional[Any] = mean_iou(
results=_a ,gt_seg_maps=_a ,num_labels=_a ,ignore_index=_a ,nan_to_num=_a ,label_map=_a ,reduce_labels=_a ,)
return iou_result
| 665 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all BART models at https://huggingface.co/models?filter=bart
__magic_name__ = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
'tokenizer_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json',
},
}
__magic_name__ = {
'facebook/bart-base': 1_024,
'facebook/bart-large': 1_024,
'facebook/bart-large-mnli': 1_024,
'facebook/bart-large-cnn': 1_024,
'facebook/bart-large-xsum': 1_024,
'yjernite/bart_eli5': 1_024,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
a_ = BartTokenizer
def __init__( self : str ,_a : Any=None ,_a : Optional[int]=None ,_a : int=None ,_a : Optional[int]="replace" ,_a : Dict="<s>" ,_a : Optional[Any]="</s>" ,_a : Dict="</s>" ,_a : Tuple="<s>" ,_a : Optional[Any]="<unk>" ,_a : List[str]="<pad>" ,_a : int="<mask>" ,_a : str=False ,_a : List[str]=True ,**_a : Dict ,):
'''simple docstring'''
super().__init__(
_a ,_a ,tokenizer_file=_a ,errors=_a ,bos_token=_a ,eos_token=_a ,sep_token=_a ,cls_token=_a ,unk_token=_a ,pad_token=_a ,mask_token=_a ,add_prefix_space=_a ,trim_offsets=_a ,**_a ,)
A_ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" ,_a ) != add_prefix_space:
A_ : List[str] = getattr(_a ,pre_tok_state.pop("""type""" ) )
A_ : Optional[int] = add_prefix_space
A_ : int = pre_tok_class(**_a )
A_ : str = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A_ : str = """post_processor"""
A_ : List[Any] = getattr(self.backend_tokenizer ,_a ,_a )
if tokenizer_component_instance:
A_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A_ : Tuple = tuple(state["""sep"""] )
if "cls" in state:
A_ : Tuple = tuple(state["""cls"""] )
A_ : List[str] = False
if state.get("""add_prefix_space""" ,_a ) != add_prefix_space:
A_ : Dict = add_prefix_space
A_ : Any = True
if state.get("""trim_offsets""" ,_a ) != trim_offsets:
A_ : Union[str, Any] = trim_offsets
A_ : List[Any] = True
if changes_to_apply:
A_ : Optional[int] = getattr(_a ,state.pop("""type""" ) )
A_ : Tuple = component_class(**_a )
setattr(self.backend_tokenizer ,_a ,_a )
@property
def _a ( self : List[str] ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def _a ( self : Union[str, Any] ,_a : Any ):
'''simple docstring'''
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else value
A_ : List[Any] = value
def _a ( self : str ,*_a : str ,**_a : Optional[int] ):
'''simple docstring'''
A_ : Optional[Any] = kwargs.get("""is_split_into_words""" ,_a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*_a ,**_a )
def _a ( self : str ,*_a : List[Any] ,**_a : str ):
'''simple docstring'''
A_ : List[str] = kwargs.get("""is_split_into_words""" ,_a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*_a ,**_a )
def _a ( self : Optional[int] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
A_ : str = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
def _a ( self : str ,_a : Optional[int] ,_a : int=None ):
'''simple docstring'''
A_ : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _a ( self : Optional[int] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Dict = [self.sep_token_id]
A_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 665 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = KandinskyInpaintPipeline
a_ = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
a_ = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
a_ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a_ = False
@property
def _a ( self : int ):
'''simple docstring'''
return 32
@property
def _a ( self : Dict ):
'''simple docstring'''
return 32
@property
def _a ( self : Tuple ):
'''simple docstring'''
return self.time_input_dim
@property
def _a ( self : Tuple ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _a ( self : List[Any] ):
'''simple docstring'''
return 100
@property
def _a ( self : int ):
'''simple docstring'''
A_ : List[str] = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : Optional[int] = MCLIPConfig(
numDims=self.cross_attention_dim ,transformerDimensions=self.text_embedder_hidden_size ,hidden_size=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=1005 ,)
A_ : List[Any] = MultilingualCLIP(_a )
A_ : Any = text_encoder.eval()
return text_encoder
@property
def _a ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : Optional[int] = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
A_ : Union[str, Any] = UNetaDConditionModel(**_a )
return model
@property
def _a ( self : Any ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : Dict = VQModel(**self.dummy_movq_kwargs )
return model
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Any = self.dummy_text_encoder
A_ : Tuple = self.dummy_tokenizer
A_ : List[Any] = self.dummy_unet
A_ : Any = self.dummy_movq
A_ : Any = DDIMScheduler(
num_train_timesteps=1000 ,beta_schedule="""linear""" ,beta_start=0.00085 ,beta_end=0.012 ,clip_sample=_a ,set_alpha_to_one=_a ,steps_offset=1 ,prediction_type="""epsilon""" ,thresholding=_a ,)
A_ : List[Any] = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _a ( self : List[Any] ,_a : List[str] ,_a : str=0 ):
'''simple docstring'''
A_ : Any = floats_tensor((1, self.cross_attention_dim) ,rng=random.Random(_a ) ).to(_a )
A_ : Optional[int] = floats_tensor((1, self.cross_attention_dim) ,rng=random.Random(seed + 1 ) ).to(_a )
# create init_image
A_ : Union[str, Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(_a ) ).to(_a )
A_ : str = image.cpu().permute(0 ,2 ,3 ,1 )[0]
A_ : str = Image.fromarray(np.uinta(_a ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
A_ : Any = np.ones((64, 64) ,dtype=np.floataa )
A_ : List[str] = 0
if str(_a ).startswith("""mps""" ):
A_ : Union[str, Any] = torch.manual_seed(_a )
else:
A_ : int = torch.Generator(device=_a ).manual_seed(_a )
A_ : List[str] = {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Any = """cpu"""
A_ : Optional[int] = self.get_dummy_components()
A_ : Tuple = self.pipeline_class(**_a )
A_ : int = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A_ : Optional[Any] = pipe(**self.get_dummy_inputs(_a ) )
A_ : int = output.images
A_ : Optional[int] = pipe(
**self.get_dummy_inputs(_a ) ,return_dict=_a ,)[0]
A_ : Optional[Any] = image[0, -3:, -3:, -1]
A_ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
print(f'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
A_ : Tuple = np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def _a ( self : Tuple ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
A_ : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
A_ : Optional[Any] = np.ones((768, 768) ,dtype=np.floataa )
A_ : Any = 0
A_ : Tuple = """a hat"""
A_ : str = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" ,torch_dtype=torch.floataa )
pipe_prior.to(_a )
A_ : Dict = KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" ,torch_dtype=torch.floataa )
A_ : Optional[Any] = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
A_ : List[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A_ , A_ : str = pipe_prior(
_a ,generator=_a ,num_inference_steps=5 ,negative_prompt="""""" ,).to_tuple()
A_ : str = pipeline(
_a ,image=_a ,mask_image=_a ,image_embeds=_a ,negative_image_embeds=_a ,generator=_a ,num_inference_steps=100 ,height=768 ,width=768 ,output_type="""np""" ,)
A_ : Optional[int] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_a ,_a )
| 665 |
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : str):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
A_ : int = TapasConfig.from_json_file(lowerCamelCase)
# set absolute/relative position embeddings parameter
A_ : List[Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
A_ : Optional[int] = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "WTQ":
# run_task_main.py hparams
A_ : Tuple = 4
A_ : Optional[Any] = True
# hparam_utils.py hparams
A_ : Any = 0.66_4694
A_ : str = 0.20_7951
A_ : Any = 0.12_1194
A_ : str = True
A_ : Dict = True
A_ : int = False
A_ : int = 0.035_2513
A_ : Tuple = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
A_ : int = 4
A_ : Union[str, Any] = False
# hparam_utils.py hparams
A_ : Dict = 36.4519
A_ : List[Any] = 0.90_3421
A_ : Any = 222.088
A_ : Optional[Any] = True
A_ : Optional[int] = True
A_ : Optional[Any] = True
A_ : Optional[int] = 0.76_3141
A_ : Any = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "TABFACT":
A_ : Any = TapasForSequenceClassification(config=lowerCamelCase)
elif task == "MLM":
A_ : List[Any] = TapasForMaskedLM(config=lowerCamelCase)
elif task == "INTERMEDIATE_PRETRAINING":
A_ : Union[str, Any] = TapasModel(config=lowerCamelCase)
else:
raise ValueError(F'Task {task} not supported.')
print(F'Building PyTorch model from configuration: {config}')
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Save pytorch-model (weights and configuration)
print(F'Save PyTorch model to {pytorch_dump_path}')
model.save_pretrained(lowerCamelCase)
# Save tokenizer files
print(F'Save tokenizer files to {pytorch_dump_path}')
A_ : Optional[Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512)
tokenizer.save_pretrained(lowerCamelCase)
print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__magic_name__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 665 | 1 |
'''simple docstring'''
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
__magic_name__ = namedtuple(
'_TestCommandArgs',
[
'dataset',
'name',
'cache_dir',
'data_dir',
'all_configs',
'save_infos',
'ignore_verifications',
'force_redownload',
'clear_cache',
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowerCamelCase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : str):
return (abs(source - target) / target) < 0.01
@pytest.mark.integration
def lowerCamelCase ( lowerCamelCase : Tuple):
A_ : int = _TestCommandArgs(dataset=lowerCamelCase , all_configs=lowerCamelCase , save_infos=lowerCamelCase)
A_ : Optional[int] = TestCommand(*lowerCamelCase)
test_command.run()
A_ : List[str] = os.path.join(lowerCamelCase , """README.md""")
assert os.path.exists(lowerCamelCase)
A_ : Any = DatasetInfosDict.from_directory(lowerCamelCase)
A_ : int = DatasetInfosDict(
{
"""default""": DatasetInfo(
features=Features(
{
"""tokens""": Sequence(Value("""string""")),
"""ner_tags""": Sequence(
ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""])),
"""langs""": Sequence(Value("""string""")),
"""spans""": Sequence(Value("""string""")),
}) , splits=[
{
"""name""": """train""",
"""num_bytes""": 235_1563,
"""num_examples""": 1_0000,
},
{
"""name""": """validation""",
"""num_bytes""": 23_8418,
"""num_examples""": 1000,
},
] , download_size=394_0680 , dataset_size=258_9981 , )
})
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
A_ , A_ : List[str] = getattr(dataset_infos["""default"""] , lowerCamelCase), getattr(expected_dataset_infos["""default"""] , lowerCamelCase)
if key == "num_bytes":
assert is_apercent_close(lowerCamelCase , lowerCamelCase)
elif key == "splits":
assert list(lowerCamelCase) == list(lowerCamelCase)
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes)
else:
result == expected
| 665 |
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""vqvae"""]
def __init__( self : Optional[Any] ,_a : AutoencoderKL ,_a : UNetaDConditionModel ,_a : Mel ,_a : Union[DDIMScheduler, DDPMScheduler] ,):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_a ,scheduler=_a ,mel=_a ,vqvae=_a )
def _a ( self : str ):
'''simple docstring'''
return 50 if isinstance(self.scheduler ,_a ) else 1000
@torch.no_grad()
def __call__( self : Optional[int] ,_a : int = 1 ,_a : str = None ,_a : np.ndarray = None ,_a : int = 0 ,_a : int = 0 ,_a : int = None ,_a : torch.Generator = None ,_a : float = 0 ,_a : float = 0 ,_a : torch.Generator = None ,_a : float = 0 ,_a : torch.Tensor = None ,_a : torch.Tensor = None ,_a : int=True ,):
'''simple docstring'''
A_ : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(_a )
A_ : Union[str, Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
A_ : Tuple = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
A_ : int = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) ,generator=_a ,device=self.device ,)
A_ : List[Any] = noise
A_ : str = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_a ,_a )
A_ : Any = self.mel.audio_slice_to_image(_a )
A_ : Union[str, Any] = np.frombuffer(input_image.tobytes() ,dtype="""uint8""" ).reshape(
(input_image.height, input_image.width) )
A_ : Optional[Any] = (input_image / 255) * 2 - 1
A_ : Union[str, Any] = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device )
if self.vqvae is not None:
A_ : Union[str, Any] = self.vqvae.encode(torch.unsqueeze(_a ,0 ) ).latent_dist.sample(
generator=_a )[0]
A_ : List[str] = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
A_ : Any = self.scheduler.add_noise(_a ,_a ,self.scheduler.timesteps[start_step - 1] )
A_ : Tuple = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
A_ : Tuple = int(mask_start_secs * pixels_per_second )
A_ : str = int(mask_end_secs * pixels_per_second )
A_ : int = self.scheduler.add_noise(_a ,_a ,torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet ,_a ):
A_ : Optional[Any] = self.unet(_a ,_a ,_a )["""sample"""]
else:
A_ : List[Any] = self.unet(_a ,_a )["""sample"""]
if isinstance(self.scheduler ,_a ):
A_ : Dict = self.scheduler.step(
model_output=_a ,timestep=_a ,sample=_a ,eta=_a ,generator=_a ,)["""prev_sample"""]
else:
A_ : Any = self.scheduler.step(
model_output=_a ,timestep=_a ,sample=_a ,generator=_a ,)["""prev_sample"""]
if mask is not None:
if mask_start > 0:
A_ : Tuple = mask[:, step, :, :mask_start]
if mask_end > 0:
A_ : List[str] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
A_ : str = 1 / self.vqvae.config.scaling_factor * images
A_ : Union[str, Any] = self.vqvae.decode(_a )["""sample"""]
A_ : int = (images / 2 + 0.5).clamp(0 ,1 )
A_ : str = images.cpu().permute(0 ,2 ,3 ,1 ).numpy()
A_ : Optional[int] = (images * 255).round().astype("""uint8""" )
A_ : List[Any] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_a ,mode="""RGB""" ).convert("""L""" ) for _ in images) )
A_ : Tuple = [self.mel.image_to_audio(_a ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_a )[:, np.newaxis, :] ) ,**ImagePipelineOutput(_a ) )
@torch.no_grad()
def _a ( self : Union[str, Any] ,_a : List[Image.Image] ,_a : int = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler ,_a )
self.scheduler.set_timesteps(_a )
A_ : Optional[Any] = np.array(
[np.frombuffer(image.tobytes() ,dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] )
A_ : List[str] = (sample / 255) * 2 - 1
A_ : Optional[int] = torch.Tensor(_a ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ):
A_ : List[str] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
A_ : Any = self.scheduler.alphas_cumprod[t]
A_ : List[Any] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
A_ : str = 1 - alpha_prod_t
A_ : List[str] = self.unet(_a ,_a )["""sample"""]
A_ : str = (1 - alpha_prod_t_prev) ** 0.5 * model_output
A_ : Union[str, Any] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
A_ : Optional[int] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def _a ( _a : torch.Tensor ,_a : torch.Tensor ,_a : float ):
'''simple docstring'''
A_ : List[Any] = acos(torch.dot(torch.flatten(_a ) ,torch.flatten(_a ) ) / torch.norm(_a ) / torch.norm(_a ) )
return sin((1 - alpha) * theta ) * xa / sin(_a ) + sin(alpha * theta ) * xa / sin(_a )
| 665 | 1 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@require_torch
def _a ( self : int ):
'''simple docstring'''
A_ : Any = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
A_ : Union[str, Any] = """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
A_ : Tuple = """
import socket
def offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
A_ : Any = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(_a )
BertModel.from_pretrained(_a )
BertTokenizer.from_pretrained(_a )
pipeline(task="""fill-mask""" ,model=_a )
# baseline - just load from_pretrained with normal network
A_ : List[str] = [sys.executable, """-c""", """\n""".join([load, run, mock] )]
# should succeed
A_ : Union[str, Any] = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
A_ : Union[str, Any] = """1"""
A_ : str = subprocess.run(_a ,env=_a ,check=_a ,capture_output=_a )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn("""success""" ,result.stdout.decode() )
@require_torch
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : int = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
A_ : Any = """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
A_ : List[Any] = """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
A_ : str = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(_a )
BertModel.from_pretrained(_a )
BertTokenizer.from_pretrained(_a )
pipeline(task="""fill-mask""" ,model=_a )
# baseline - just load from_pretrained with normal network
A_ : List[Any] = [sys.executable, """-c""", """\n""".join([load, run, mock] )]
# should succeed
A_ : List[str] = self.get_env()
A_ : List[Any] = subprocess.run(_a ,env=_a ,check=_a ,capture_output=_a )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn("""success""" ,result.stdout.decode() )
@require_torch
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Union[str, Any] = """
from transformers import BertConfig, BertModel, BertTokenizer
"""
A_ : Any = """
mname = \"hf-internal-testing/tiny-random-bert-sharded\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print(\"success\")
"""
A_ : Optional[Any] = """
import socket
def offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
# baseline - just load from_pretrained with normal network
A_ : Union[str, Any] = [sys.executable, """-c""", """\n""".join([load, run] )]
# should succeed
A_ : Optional[Any] = self.get_env()
A_ : int = subprocess.run(_a ,env=_a ,check=_a ,capture_output=_a )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn("""success""" ,result.stdout.decode() )
# next emulate no network
A_ : Tuple = [sys.executable, """-c""", """\n""".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
A_ : Optional[int] = """1"""
A_ : Optional[Any] = subprocess.run(_a ,env=_a ,check=_a ,capture_output=_a )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn("""success""" ,result.stdout.decode() )
@require_torch
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = """
from transformers import pipeline
"""
A_ : Any = """
mname = \"hf-internal-testing/tiny-random-bert\"
pipe = pipeline(model=mname)
"""
A_ : Optional[Any] = """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
A_ : Any = self.get_env()
A_ : Optional[Any] = """1"""
A_ : Tuple = [sys.executable, """-c""", """\n""".join([load, mock, run] )]
A_ : Tuple = subprocess.run(_a ,env=_a ,check=_a ,capture_output=_a )
self.assertEqual(result.returncode ,1 ,result.stderr )
self.assertIn(
"""You cannot infer task automatically within `pipeline` when using offline mode""" ,result.stderr.decode().replace("""\n""" ,"""""" ) ,)
@require_torch
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Optional[int] = """
from transformers import AutoModel
"""
A_ : Optional[int] = """
mname = \"hf-internal-testing/test_dynamic_model\"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print(\"success\")
"""
# baseline - just load from_pretrained with normal network
A_ : Optional[Any] = [sys.executable, """-c""", """\n""".join([load, run] )]
# should succeed
A_ : Optional[int] = self.get_env()
A_ : Optional[Any] = subprocess.run(_a ,env=_a ,check=_a ,capture_output=_a )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn("""success""" ,result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
A_ : Optional[int] = """1"""
A_ : int = subprocess.run(_a ,env=_a ,check=_a ,capture_output=_a )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn("""success""" ,result.stdout.decode() )
| 665 |
'''simple docstring'''
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__magic_name__ = 16
__magic_name__ = 32
def lowerCamelCase ( lowerCamelCase : Accelerator , lowerCamelCase : int = 16):
A_ : Any = AutoTokenizer.from_pretrained("""bert-base-cased""")
A_ : str = load_dataset("""glue""" , """mrpc""")
def tokenize_function(lowerCamelCase : Dict):
# max_length=None => use the model max length (it's actually the default)
A_ : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase , max_length=lowerCamelCase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A_ : Tuple = datasets.map(
lowerCamelCase , batched=lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A_ : List[str] = tokenized_datasets.rename_column("""label""" , """labels""")
def collate_fn(lowerCamelCase : Tuple):
# On TPU it's best to pad everything to the same length or training will be very slow.
A_ : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A_ : List[Any] = 16
elif accelerator.mixed_precision != "no":
A_ : Any = 8
else:
A_ : Tuple = None
return tokenizer.pad(
lowerCamelCase , padding="""longest""" , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
A_ : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase , drop_last=lowerCamelCase)
A_ : str = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase , drop_last=(accelerator.mixed_precision == """fp8""") , )
return train_dataloader, eval_dataloader
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Dict):
# Initialize accelerator
A_ : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A_ : List[Any] = config["""lr"""]
A_ : List[Any] = int(config["""num_epochs"""])
A_ : int = int(config["""seed"""])
A_ : Dict = int(config["""batch_size"""])
A_ : Union[str, Any] = evaluate.load("""glue""" , """mrpc""")
# If the batch size is too big we use gradient accumulation
A_ : int = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A_ : Any = batch_size // MAX_GPU_BATCH_SIZE
A_ : Union[str, Any] = MAX_GPU_BATCH_SIZE
set_seed(lowerCamelCase)
A_ , A_ : List[str] = get_dataloaders(lowerCamelCase , lowerCamelCase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCamelCase)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A_ : str = model.to(accelerator.device)
# Instantiate optimizer
A_ : str = AdamW(params=model.parameters() , lr=lowerCamelCase)
# Instantiate scheduler
A_ : Tuple = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(lowerCamelCase) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A_ , A_ , A_ , A_ , A_ : Union[str, Any] = accelerator.prepare(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Now we train the model
for epoch in range(lowerCamelCase):
model.train()
for step, batch in enumerate(lowerCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
A_ : Optional[int] = model(**lowerCamelCase)
A_ : List[Any] = outputs.loss
A_ : Tuple = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
A_ : Union[str, Any] = model(**lowerCamelCase)
A_ : Any = outputs.logits.argmax(dim=-1)
A_ , A_ : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""]))
metric.add_batch(
predictions=lowerCamelCase , references=lowerCamelCase , )
A_ : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowerCamelCase)
def lowerCamelCase ( ):
A_ : Optional[int] = argparse.ArgumentParser(description="""Simple example of training script.""")
parser.add_argument(
"""--mixed_precision""" , type=lowerCamelCase , default=lowerCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""")
A_ : Dict = parser.parse_args()
A_ : Dict = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowerCamelCase , lowerCamelCase)
if __name__ == "__main__":
main()
| 665 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[str]):
A_ : str = 0
A_ : Optional[int] = len(lowerCamelCase) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A_ : Optional[int] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowerCamelCase):
return None
A_ : List[str] = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
A_ : str = left
A_ : str = point
elif point > right:
A_ : List[Any] = right
A_ : List[str] = point
else:
if item < current_item:
A_ : Union[str, Any] = point - 1
else:
A_ : Dict = point + 1
return None
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[int] , lowerCamelCase : List[str]):
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A_ : Dict = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowerCamelCase):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
elif point > right:
return interpolation_search_by_recursion(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
lowerCamelCase , lowerCamelCase , lowerCamelCase , point - 1)
else:
return interpolation_search_by_recursion(
lowerCamelCase , lowerCamelCase , point + 1 , lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : Dict):
if collection != sorted(lowerCamelCase):
raise ValueError("""Collection must be ascending sorted""")
return True
if __name__ == "__main__":
import sys
__magic_name__ = 0
if debug == 1:
__magic_name__ = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('Sequence must be ascending sorted to apply interpolation search')
__magic_name__ = 67
__magic_name__ = interpolation_search(collection, target)
if result is not None:
print(f"""{target} found at positions: {result}""")
else:
print('Not found')
| 665 |
'''simple docstring'''
import functools
def lowerCamelCase ( lowerCamelCase : list[int] , lowerCamelCase : list[int]):
# Validation
if not isinstance(lowerCamelCase , lowerCamelCase) or not all(isinstance(lowerCamelCase , lowerCamelCase) for day in days):
raise ValueError("""The parameter days should be a list of integers""")
if len(lowerCamelCase) != 3 or not all(isinstance(lowerCamelCase , lowerCamelCase) for cost in costs):
raise ValueError("""The parameter costs should be a list of three integers""")
if len(lowerCamelCase) == 0:
return 0
if min(lowerCamelCase) <= 0:
raise ValueError("""All days elements should be greater than 0""")
if max(lowerCamelCase) >= 366:
raise ValueError("""All days elements should be less than 366""")
A_ : Tuple = set(lowerCamelCase)
@functools.cache
def dynamic_programming(lowerCamelCase : int) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1)
return min(
costs[0] + dynamic_programming(index + 1) , costs[1] + dynamic_programming(index + 7) , costs[2] + dynamic_programming(index + 30) , )
return dynamic_programming(1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 665 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def lowerCamelCase ( lowerCamelCase : NDArray[floataa] , lowerCamelCase : NDArray[floataa] , lowerCamelCase : list[int] , lowerCamelCase : int , ):
A_ , A_ : int = coefficient_matrix.shape
A_ , A_ : Union[str, Any] = constant_matrix.shape
if rowsa != colsa:
A_ : Any = F'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'
raise ValueError(lowerCamelCase)
if colsa != 1:
A_ : Tuple = F'Constant matrix must be nx1 but received {rowsa}x{colsa}'
raise ValueError(lowerCamelCase)
if rowsa != rowsa:
A_ : Dict = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
F'received {rowsa}x{colsa} and {rowsa}x{colsa}'
)
raise ValueError(lowerCamelCase)
if len(lowerCamelCase) != rowsa:
A_ : Union[str, Any] = (
"""Number of initial values must be equal to number of rows in coefficient """
F'matrix but received {len(lowerCamelCase)} and {rowsa}'
)
raise ValueError(lowerCamelCase)
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""")
A_ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1)
A_ , A_ : int = table.shape
strictly_diagonally_dominant(lowerCamelCase)
# Iterates the whole matrix for given number of times
for _ in range(lowerCamelCase):
A_ : List[Any] = []
for row in range(lowerCamelCase):
A_ : int = 0
for col in range(lowerCamelCase):
if col == row:
A_ : List[str] = table[row][col]
elif col == cols - 1:
A_ : str = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
A_ : Union[str, Any] = (temp + val) / denom
new_val.append(lowerCamelCase)
A_ : Tuple = new_val
return [float(lowerCamelCase) for i in new_val]
def lowerCamelCase ( lowerCamelCase : NDArray[floataa]):
A_ , A_ : Dict = table.shape
A_ : Union[str, Any] = True
for i in range(0 , lowerCamelCase):
A_ : str = 0
for j in range(0 , cols - 1):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""")
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
a_ = JukeboxTokenizer
a_ = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,
Who said \"Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def _a ( self : Tuple ):
'''simple docstring'''
import torch
A_ : Any = JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" )
A_ : Any = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
A_ : Union[str, Any] = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] ,EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] ,EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] ,EXPECTED_OUTPUT[2] ) )
@require_torch
def _a ( self : Dict ):
'''simple docstring'''
import torch
A_ : Any = JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" )
A_ : Optional[Any] = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
A_ : Union[str, Any] = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] ,EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] ,EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] ,EXPECTED_OUTPUT[2] ) )
| 665 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str):
A_ : Any = len(lowerCamelCase)
A_ : Optional[Any] = len(lowerCamelCase)
A_ : Optional[int] = [[False for _ in range(m + 1)] for _ in range(n + 1)]
A_ : Union[str, Any] = True
for i in range(lowerCamelCase):
for j in range(m + 1):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
A_ : Optional[int] = True
if a[i].islower():
A_ : List[Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """gpt_neox"""
def __init__( self : str ,_a : str=50432 ,_a : Optional[int]=6144 ,_a : List[str]=44 ,_a : List[Any]=64 ,_a : Tuple=24576 ,_a : str="gelu" ,_a : Any=0.25 ,_a : Any=10000 ,_a : str=0.0 ,_a : Optional[Any]=0.0 ,_a : str=0.1 ,_a : Optional[Any]=2048 ,_a : List[Any]=0.02 ,_a : Any=1e-5 ,_a : str=True ,_a : Optional[Any]=0 ,_a : Tuple=2 ,_a : List[str]=False ,_a : List[str]=True ,_a : int=None ,**_a : List[Any] ,):
'''simple docstring'''
super().__init__(bos_token_id=_a ,eos_token_id=_a ,**_a )
A_ : List[Any] = vocab_size
A_ : Any = max_position_embeddings
A_ : str = hidden_size
A_ : Optional[int] = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : Any = intermediate_size
A_ : Union[str, Any] = hidden_act
A_ : Tuple = rotary_pct
A_ : Optional[int] = rotary_emb_base
A_ : List[Any] = attention_dropout
A_ : List[Any] = hidden_dropout
A_ : Tuple = classifier_dropout
A_ : Any = initializer_range
A_ : Dict = layer_norm_eps
A_ : int = use_cache
A_ : Optional[int] = tie_word_embeddings
A_ : Tuple = use_parallel_residual
A_ : int = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"""The hidden size is not divisble by the number of attention heads! Make sure to update them!""" )
def _a ( self : Any ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling ,_a ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f'got {self.rope_scaling}' )
A_ : Union[str, Any] = self.rope_scaling.get("""type""" ,_a )
A_ : Dict = self.rope_scaling.get("""factor""" ,_a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(_a ,_a ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 665 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
a_ = 42
a_ = 42
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ,_a : int ):
'''simple docstring'''
A_ : list[list[Edge]] = [[] for _ in range(_a )]
A_ : List[Any] = size
def __getitem__( self : int ,_a : int ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def _a ( self : str ):
'''simple docstring'''
return self._size
def _a ( self : str ,_a : int ,_a : int ,_a : int ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(_a ,_a ) )
def _a ( self : Dict ,_a : int ,_a : int ):
'''simple docstring'''
A_ : Tuple = deque([start_vertex] )
A_ : list[int | None] = [None] * self.size
A_ : Union[str, Any] = 0
while queue:
A_ : List[Any] = queue.popleft()
A_ : Tuple = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
A_ : Union[str, Any] = current_distance + edge.weight
A_ : Optional[Any] = distances[edge.destination_vertex]
if (
isinstance(_a ,_a )
and new_distance >= dest_vertex_distance
):
continue
A_ : Tuple = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : int):
if b == 0:
return (1, 0)
((A_) , (A_)) : Dict = extended_euclid(lowerCamelCase , a % b)
A_ : Optional[Any] = a // b
return (y, x - k * y)
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int):
((A_) , (A_)) : Optional[Any] = extended_euclid(lowerCamelCase , lowerCamelCase)
A_ : Union[str, Any] = na * na
A_ : str = ra * x * na + ra * y * na
return (n % m + m) % m
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : int):
((A_) , (A_)) : str = extended_euclid(lowerCamelCase , lowerCamelCase)
if b < 0:
A_ : Union[str, Any] = (b % n + n) % n
return b
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int):
A_ , A_ : Optional[int] = invert_modulo(lowerCamelCase , lowerCamelCase), invert_modulo(lowerCamelCase , lowerCamelCase)
A_ : Union[str, Any] = na * na
A_ : Optional[Any] = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='chinese_remainder_theorem', verbose=True)
testmod(name='chinese_remainder_theorem2', verbose=True)
testmod(name='invert_modulo', verbose=True)
testmod(name='extended_euclid', verbose=True)
| 665 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : int = 10**9):
A_ : Optional[int] = 1
A_ : int = 2
A_ : List[Any] = 0
A_ : Optional[Any] = 0
A_ : str = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
A_ : Optional[Any] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 665 | 1 |
'''simple docstring'''
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'snap-research/efficientformer-l1-300': (
'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """efficientformer"""
def __init__( self : Dict ,_a : List[int] = [3, 2, 6, 4] ,_a : List[int] = [48, 96, 224, 448] ,_a : List[bool] = [True, True, True, True] ,_a : int = 448 ,_a : int = 32 ,_a : int = 4 ,_a : int = 7 ,_a : int = 5 ,_a : int = 8 ,_a : int = 4 ,_a : float = 0.0 ,_a : int = 16 ,_a : int = 3 ,_a : int = 3 ,_a : int = 3 ,_a : int = 2 ,_a : int = 1 ,_a : float = 0.0 ,_a : int = 1 ,_a : bool = True ,_a : bool = True ,_a : float = 1e-5 ,_a : str = "gelu" ,_a : float = 0.02 ,_a : float = 1e-12 ,_a : int = 224 ,_a : float = 1e-05 ,**_a : Any ,):
'''simple docstring'''
super().__init__(**_a )
A_ : Optional[int] = hidden_act
A_ : List[Any] = hidden_dropout_prob
A_ : List[str] = hidden_sizes
A_ : List[Any] = num_hidden_layers
A_ : Any = num_attention_heads
A_ : Dict = initializer_range
A_ : int = layer_norm_eps
A_ : Optional[Any] = patch_size
A_ : str = num_channels
A_ : Dict = depths
A_ : str = mlp_expansion_ratio
A_ : str = downsamples
A_ : List[str] = dim
A_ : List[Any] = key_dim
A_ : int = attention_ratio
A_ : Any = resolution
A_ : int = pool_size
A_ : Dict = downsample_patch_size
A_ : List[Any] = downsample_stride
A_ : str = downsample_pad
A_ : List[str] = drop_path_rate
A_ : Union[str, Any] = num_metaad_blocks
A_ : Any = distillation
A_ : Dict = use_layer_scale
A_ : Optional[Any] = layer_scale_init_value
A_ : Optional[Any] = image_size
A_ : str = batch_norm_eps
| 665 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def lowerCamelCase ( ):
A_ : Optional[int] = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=lowerCamelCase)
A_ : Optional[int] = parser.add_subparsers(help="""accelerate command helpers""")
# Register commands
get_config_parser(subparsers=lowerCamelCase)
env_command_parser(subparsers=lowerCamelCase)
launch_command_parser(subparsers=lowerCamelCase)
tpu_command_parser(subparsers=lowerCamelCase)
test_command_parser(subparsers=lowerCamelCase)
# Let's go
A_ : Dict = parser.parse_args()
if not hasattr(lowerCamelCase , """func"""):
parser.print_help()
exit(1)
# Run
args.func(lowerCamelCase)
if __name__ == "__main__":
main()
| 665 | 1 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : int):
A_ : list[list[int]] = []
create_all_state(1 , lowerCamelCase , lowerCamelCase , [] , lowerCamelCase)
return result
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : list[int] , lowerCamelCase : list[list[int]] , ):
if level == 0:
total_list.append(current_list[:])
return
for i in range(lowerCamelCase , total_number - level + 2):
current_list.append(lowerCamelCase)
create_all_state(i + 1 , lowerCamelCase , level - 1 , lowerCamelCase , lowerCamelCase)
current_list.pop()
def lowerCamelCase ( lowerCamelCase : list[list[int]]):
for i in total_list:
print(*lowerCamelCase)
if __name__ == "__main__":
__magic_name__ = 4
__magic_name__ = 2
__magic_name__ = generate_all_combinations(n, k)
print_all_state(total_list)
| 665 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 1 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__magic_name__ = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
__magic_name__ = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
__magic_name__ = r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def _a ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ),
"""references""": datasets.Value("""string""" ),
} ) ,homepage="""https://github.com/hendrycks/math""" ,codebase_urls=["""https://github.com/hendrycks/math"""] ,)
def _a ( self : List[Any] ,_a : Union[str, Any] ,_a : Optional[int] ):
'''simple docstring'''
A_ : Union[str, Any] = 0.0
for i, j in zip(_a ,_a ):
n_correct += 1.0 if math_equivalence.is_equiv(_a ,_a ) else 0.0
A_ : List[str] = n_correct / len(_a )
return {
"accuracy": accuracy,
}
| 665 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['YolosFeatureExtractor']
__magic_name__ = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 1 |
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__magic_name__ = datasets.load_iris()
__magic_name__ = np.array(data['data'])
__magic_name__ = np.array(data['target'])
__magic_name__ = data['target_names']
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = train_test_split(X, y)
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : List[str]):
return np.linalg.norm(np.array(lowerCamelCase) - np.array(lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : Any , lowerCamelCase : Dict , lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple=5):
A_ : Tuple = zip(lowerCamelCase , lowerCamelCase)
# List of distances of all points from the point to be classified
A_ : int = []
for data_point in data:
A_ : Union[str, Any] = euclidean_distance(data_point[0] , lowerCamelCase)
distances.append((distance, data_point[1]))
# Choosing 'k' points with the least distances.
A_ : Dict = [i[1] for i in sorted(lowerCamelCase)[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
A_ : List[Any] = Counter(lowerCamelCase).most_common(1)[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 665 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : float , lowerCamelCase : float , lowerCamelCase : float , lowerCamelCase : float , lowerCamelCase : float , ):
A_ : str = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters):
raise ValueError("""All input parameters must be positive""")
if any(p > 1 for p in parameters[1:4]):
raise ValueError("""Relative densities cannot be greater than one""")
else:
A_ : Any = 1 - (matter_density + radiation_density + dark_energy)
A_ : Any = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
A_ : Union[str, Any] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
__magic_name__ = 0.3
print(
hubble_parameter(
hubble_constant=6_8.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 665 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : Tuple):
A_ : str = [0] * len(lowerCamelCase)
A_ : Union[str, Any] = []
A_ : Union[str, Any] = []
A_ : Tuple = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowerCamelCase)):
if indegree[i] == 0:
queue.append(lowerCamelCase)
while queue:
A_ : Any = queue.pop(0)
cnt += 1
topo.append(lowerCamelCase)
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(lowerCamelCase)
if cnt != len(lowerCamelCase):
print("""Cycle exists""")
else:
print(lowerCamelCase)
# Adjacency List of Graph
__magic_name__ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 665 | 1 |
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """pixel_values"""
a_ = False
a_ = TimmBackboneConfig
def __init__( self : Dict ,_a : Tuple ,**_a : str ):
'''simple docstring'''
requires_backends(self ,"""timm""" )
super().__init__(_a )
A_ : List[str] = config
if config.backbone is None:
raise ValueError("""backbone is not set in the config. Please set it to a timm model name.""" )
if config.backbone not in timm.list_models():
raise ValueError(f'backbone {config.backbone} is not supported by timm.' )
if hasattr(_a ,"""out_features""" ) and config.out_features is not None:
raise ValueError("""out_features is not supported by TimmBackbone. Please use out_indices instead.""" )
A_ : str = getattr(_a ,"""use_pretrained_backbone""" ,_a )
if pretrained is None:
raise ValueError("""use_pretrained_backbone is not set in the config. Please set it to True or False.""" )
# We just take the final layer by default. This matches the default for the transformers models.
A_ : Any = config.out_indices if getattr(_a ,"""out_indices""" ,_a ) is not None else (-1,)
A_ : Union[str, Any] = timm.create_model(
config.backbone ,pretrained=_a ,features_only=config.features_only ,in_chans=config.num_channels ,out_indices=_a ,**_a ,)
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
A_ : Dict = self._backbone.return_layers
A_ : Union[str, Any] = {layer["""module"""]: str(_a ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(_a )
@classmethod
def _a ( cls : Tuple ,_a : Dict ,*_a : Any ,**_a : Tuple ):
'''simple docstring'''
requires_backends(cls ,["""vision""", """timm"""] )
from ...models.timm_backbone import TimmBackboneConfig
A_ : List[str] = kwargs.pop("""config""" ,TimmBackboneConfig() )
A_ : Any = kwargs.pop("""use_timm_backbone""" ,_a )
if not use_timm:
raise ValueError("""use_timm_backbone must be True for timm backbones""" )
A_ : Dict = kwargs.pop("""num_channels""" ,config.num_channels )
A_ : Any = kwargs.pop("""features_only""" ,config.features_only )
A_ : Tuple = kwargs.pop("""use_pretrained_backbone""" ,config.use_pretrained_backbone )
A_ : Union[str, Any] = kwargs.pop("""out_indices""" ,config.out_indices )
A_ : Dict = TimmBackboneConfig(
backbone=_a ,num_channels=_a ,features_only=_a ,use_pretrained_backbone=_a ,out_indices=_a ,)
return super()._from_config(_a ,**_a )
def _a ( self : Union[str, Any] ,_a : Dict ):
'''simple docstring'''
pass
def _a ( self : List[str] ,_a : int ,_a : int=None ,_a : List[Any]=None ,_a : int=None ,**_a : Optional[int] ):
'''simple docstring'''
A_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
A_ : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : Dict = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("""Cannot output attentions for timm backbones at the moment""" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
A_ : Optional[int] = self._all_layers
A_ : Tuple = self._backbone(_a ,**_a )
A_ : Any = self._return_layers
A_ : Optional[Any] = tuple(hidden_states[i] for i in self.out_indices )
else:
A_ : Union[str, Any] = self._backbone(_a ,**_a )
A_ : List[Any] = None
A_ : List[str] = tuple(_a )
A_ : int = tuple(_a ) if hidden_states is not None else None
if not return_dict:
A_ : int = (feature_maps,)
if output_hidden_states:
A_ : Dict = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=_a ,hidden_states=_a ,attentions=_a )
| 665 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ,_a : List[Any] ,_a : Dict=13 ,_a : List[str]=7 ,_a : Dict=True ,_a : List[Any]=True ,_a : Dict=False ,_a : Optional[int]=True ,_a : List[Any]=99 ,_a : Any=32 ,_a : Optional[int]=5 ,_a : List[Any]=4 ,_a : int=37 ,_a : List[Any]="gelu" ,_a : List[str]=0.1 ,_a : Union[str, Any]=0.1 ,_a : Any=512 ,_a : int=16 ,_a : Optional[int]=2 ,_a : Any=0.02 ,_a : Any=3 ,_a : Any=4 ,_a : List[str]=None ,):
'''simple docstring'''
A_ : List[str] = parent
A_ : Any = batch_size
A_ : Tuple = seq_length
A_ : List[str] = is_training
A_ : Tuple = use_input_mask
A_ : Dict = use_token_type_ids
A_ : List[Any] = use_labels
A_ : Union[str, Any] = vocab_size
A_ : Any = hidden_size
A_ : str = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : str = intermediate_size
A_ : Tuple = hidden_act
A_ : Any = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : List[str] = max_position_embeddings
A_ : int = type_vocab_size
A_ : Union[str, Any] = type_sequence_label_size
A_ : Any = initializer_range
A_ : List[Any] = num_labels
A_ : Optional[Any] = num_choices
A_ : List[Any] = scope
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A_ : int = None
if self.use_input_mask:
A_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : Dict = None
if self.use_token_type_ids:
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A_ : str = None
A_ : Any = None
A_ : str = None
if self.use_labels:
A_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A_ : Optional[int] = ids_tensor([self.batch_size] ,self.num_choices )
A_ : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : Optional[Any] ):
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_a ,initializer_range=self.initializer_range ,)
def _a ( self : Union[str, Any] ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Any ,_a : Any ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Tuple ):
'''simple docstring'''
A_ : Any = LlamaModel(config=_a )
model.to(_a )
model.eval()
A_ : Optional[Any] = model(_a ,attention_mask=_a )
A_ : Optional[int] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Optional[int] ,_a : int ,_a : List[str] ,_a : Any ,_a : Any ,_a : Dict ,_a : List[str] ,_a : Optional[int] ,_a : Any ,_a : List[str] ,):
'''simple docstring'''
A_ : List[str] = True
A_ : Union[str, Any] = LlamaModel(_a )
model.to(_a )
model.eval()
A_ : Tuple = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,)
A_ : List[Any] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,)
A_ : int = model(_a ,attention_mask=_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Any ,_a : Any ,_a : Optional[int] ,_a : List[Any] ,_a : List[Any] ,_a : Dict ,_a : Tuple ,_a : Optional[int] ,_a : List[Any] ,_a : Union[str, Any] ,):
'''simple docstring'''
A_ : List[Any] = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
A_ : Dict = model(_a ,attention_mask=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : str ,_a : List[Any] ,_a : Dict ,_a : str ,_a : Tuple ,_a : Tuple ,_a : Tuple ,_a : Optional[Any] ,_a : Dict ,_a : Union[str, Any] ,):
'''simple docstring'''
A_ : Optional[Any] = True
A_ : Any = True
A_ : Tuple = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
# first forward pass
A_ : Optional[int] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,use_cache=_a ,)
A_ : Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A_ : int = ids_tensor((self.batch_size, 3) ,config.vocab_size )
A_ : List[Any] = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
A_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 )
A_ : int = torch.cat([input_mask, next_mask] ,dim=-1 )
A_ : List[str] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,output_hidden_states=_a ,)["""hidden_states"""][0]
A_ : Any = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,past_key_values=_a ,output_hidden_states=_a ,)["""hidden_states"""][0]
# select random slice
A_ : List[str] = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
A_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
A_ : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a ,_a ,atol=1e-3 ) )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Any = config_and_inputs
A_ : int = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
a_ = (LlamaForCausalLM,) if is_torch_available() else ()
a_ = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ = False
a_ = False
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = LlamaModelTester(self )
A_ : List[str] = ConfigTester(self ,config_class=_a ,hidden_size=37 )
def _a ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A_ : Dict = type
self.model_tester.create_and_check_model(*_a )
def _a ( self : List[Any] ):
'''simple docstring'''
A_ , A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[str] = 3
A_ : Any = input_dict["""input_ids"""]
A_ : Union[str, Any] = input_ids.ne(1 ).to(_a )
A_ : Union[str, Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
A_ : List[Any] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : int = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : Dict ):
'''simple docstring'''
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : str = 3
A_ : Union[str, Any] = """single_label_classification"""
A_ : Union[str, Any] = input_dict["""input_ids"""]
A_ : List[Any] = input_ids.ne(1 ).to(_a )
A_ : Dict = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
A_ : List[Any] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : List[str] = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Dict = 3
A_ : Dict = """multi_label_classification"""
A_ : Any = input_dict["""input_ids"""]
A_ : Optional[Any] = input_ids.ne(1 ).to(_a )
A_ : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
A_ : Optional[int] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : Any = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" )
def _a ( self : Any ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _a ( self : Optional[Any] ,_a : List[Any] ):
'''simple docstring'''
A_ , A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Tuple = ids_tensor([1, 10] ,config.vocab_size )
A_ : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A_ : int = LlamaModel(_a )
original_model.to(_a )
original_model.eval()
A_ : Tuple = original_model(_a ).last_hidden_state
A_ : Union[str, Any] = original_model(_a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A_ : Tuple = {"""type""": scaling_type, """factor""": 10.0}
A_ : int = LlamaModel(_a )
scaled_model.to(_a )
scaled_model.eval()
A_ : List[Any] = scaled_model(_a ).last_hidden_state
A_ : Any = scaled_model(_a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_a ,_a ,atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_a ,_a ,atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_a ,_a ,atol=1e-5 ) )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Any = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : List[str] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" ,device_map="""auto""" )
A_ : str = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
A_ : Union[str, Any] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : str = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : str ):
'''simple docstring'''
A_ : Dict = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" ,device_map="""auto""" )
A_ : Tuple = model(torch.tensor(_a ) )
# Expected mean on dim = -1
A_ : str = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : str = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" ,device_map="""auto""" )
A_ : int = model(torch.tensor(_a ) )
# Expected mean on dim = -1
A_ : Union[str, Any] = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : Optional[int] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
@unittest.skip(
"""Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" )
@slow
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : str = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" ,device_map="""auto""" )
A_ : Tuple = model(torch.tensor(_a ) )
A_ : Dict = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# fmt: off
A_ : List[str] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Model is curently gated""" )
@slow
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Union[str, Any] = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
A_ : List[str] = """Simply put, the theory of relativity states that """
A_ : Any = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" )
A_ : Union[str, Any] = tokenizer.encode(_a ,return_tensors="""pt""" )
A_ : List[str] = LlamaForCausalLM.from_pretrained(
"""meta-llama/Llama-2-13b-chat-hf""" ,device_map="""sequential""" ,use_safetensors=_a )
# greedy generation outputs
A_ : str = model.generate(_a ,max_new_tokens=64 ,top_p=_a ,temperature=1 ,do_sample=_a )
A_ : Optional[Any] = tokenizer.decode(generated_ids[0] ,skip_special_tokens=_a )
self.assertEqual(_a ,_a )
| 665 | 1 |
'''simple docstring'''
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@register_to_config
def __init__( self : int ,*,
_a : int = 4 ,_a : int = 768 ,_a : int ,_a : Union[str, Any] ,):
'''simple docstring'''
super().__init__()
A_ : List[Any] = nn.Parameter(torch.zeros(_a ) )
# parameters for additional clip time embeddings
A_ : List[str] = nn.Linear(_a ,_a )
A_ : int = nn.Linear(_a ,_a )
# parameters for encoder hidden states
A_ : Any = clip_extra_context_tokens
A_ : List[str] = nn.Linear(
_a ,self.clip_extra_context_tokens * cross_attention_dim )
A_ : Optional[int] = nn.Linear(_a ,_a )
A_ : Tuple = nn.LayerNorm(_a )
def _a ( self : str ,*, _a : Tuple ,_a : List[str] ,_a : Dict ,_a : int ):
'''simple docstring'''
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
A_ : str = image_embeddings.shape[0]
A_ : Tuple = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
A_ : List[str] = classifier_free_guidance_embeddings.expand(
_a ,-1 )
A_ : Optional[int] = torch.cat([classifier_free_guidance_embeddings, image_embeddings] ,dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
A_ : Any = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
A_ : Dict = self.embedding_proj(_a )
A_ : Any = self.clip_image_embeddings_project_to_time_embeddings(_a )
A_ : Union[str, Any] = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
A_ : Optional[Any] = self.clip_extra_context_tokens_proj(_a )
A_ : Tuple = clip_extra_context_tokens.reshape(_a ,-1 ,self.clip_extra_context_tokens )
A_ : Union[str, Any] = clip_extra_context_tokens.permute(0 ,2 ,1 )
A_ : str = self.encoder_hidden_states_proj(_a )
A_ : Union[str, Any] = self.text_encoder_hidden_states_norm(_a )
A_ : Tuple = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] ,dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 665 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__magic_name__ = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
__magic_name__ = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
__magic_name__ = r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def _a ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ),
"""references""": datasets.Value("""string""" ),
} ) ,homepage="""https://github.com/hendrycks/math""" ,codebase_urls=["""https://github.com/hendrycks/math"""] ,)
def _a ( self : List[Any] ,_a : Union[str, Any] ,_a : Optional[int] ):
'''simple docstring'''
A_ : Union[str, Any] = 0.0
for i, j in zip(_a ,_a ):
n_correct += 1.0 if math_equivalence.is_equiv(_a ,_a ) else 0.0
A_ : List[str] = n_correct / len(_a )
return {
"accuracy": accuracy,
}
| 665 | 1 |
'''simple docstring'''
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class __lowerCAmelCase :
'''simple docstring'''
pass
| 665 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__magic_name__ = logging.get_logger(__name__)
# TODO: upload to AWS
__magic_name__ = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """retribert"""
def __init__( self : int ,_a : Dict=30522 ,_a : List[Any]=768 ,_a : Optional[Any]=8 ,_a : str=12 ,_a : str=3072 ,_a : Tuple="gelu" ,_a : Optional[int]=0.1 ,_a : Dict=0.1 ,_a : List[Any]=512 ,_a : Union[str, Any]=2 ,_a : Tuple=0.02 ,_a : List[str]=1e-12 ,_a : Dict=True ,_a : Tuple=128 ,_a : Optional[int]=0 ,**_a : Tuple ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,**_a )
A_ : Dict = vocab_size
A_ : int = hidden_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : Tuple = hidden_act
A_ : int = intermediate_size
A_ : Tuple = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : int = max_position_embeddings
A_ : Any = type_vocab_size
A_ : Optional[int] = initializer_range
A_ : Dict = layer_norm_eps
A_ : str = share_encoders
A_ : List[Any] = projection_dim
| 665 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['ReformerTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['ReformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ReformerAttention',
'ReformerForMaskedLM',
'ReformerForQuestionAnswering',
'ReformerForSequenceClassification',
'ReformerLayer',
'ReformerModel',
'ReformerModelWithLMHead',
'ReformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'spiece.model'}
__magic_name__ = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
__magic_name__ = {
'google/bigbird-roberta-base': 4_096,
'google/bigbird-roberta-large': 4_096,
'google/bigbird-base-trivia-itc': 4_096,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
a_ = []
def __init__( self : Optional[int] ,_a : int ,_a : Optional[Any]="<unk>" ,_a : int="<s>" ,_a : str="</s>" ,_a : Optional[Any]="<pad>" ,_a : Tuple="[SEP]" ,_a : Tuple="[MASK]" ,_a : Union[str, Any]="[CLS]" ,_a : Optional[Dict[str, Any]] = None ,**_a : Any ,):
'''simple docstring'''
A_ : Dict = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else bos_token
A_ : Union[str, Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else eos_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else unk_token
A_ : Union[str, Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else pad_token
A_ : Any = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else cls_token
A_ : Optional[int] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
A_ : List[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
A_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a ,eos_token=_a ,unk_token=_a ,pad_token=_a ,sep_token=_a ,mask_token=_a ,cls_token=_a ,sp_model_kwargs=self.sp_model_kwargs ,**_a ,)
A_ : Optional[int] = vocab_file
A_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def _a ( self : Union[str, Any] ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Tuple = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = self.__dict__.copy()
A_ : Union[str, Any] = None
return state
def __setstate__( self : List[Any] ,_a : Any ):
'''simple docstring'''
A_ : Tuple = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
A_ : Tuple = {}
A_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self : Union[str, Any] ,_a : str ):
'''simple docstring'''
return self.sp_model.encode(_a ,out_type=_a )
def _a ( self : Optional[int] ,_a : str ):
'''simple docstring'''
return self.sp_model.piece_to_id(_a )
def _a ( self : int ,_a : Optional[int] ):
'''simple docstring'''
A_ : List[str] = self.sp_model.IdToPiece(_a )
return token
def _a ( self : Dict ,_a : int ):
'''simple docstring'''
A_ : int = []
A_ : Any = """"""
A_ : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
A_ : Dict = True
A_ : Union[str, Any] = []
else:
current_sub_tokens.append(_a )
A_ : str = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def _a ( self : int ,_a : List[int] ,_a : bool = False ,_a : bool = None ,_a : bool = True ,**_a : str ,):
'''simple docstring'''
A_ : Any = kwargs.pop("""use_source_tokenizer""" ,_a )
A_ : Union[str, Any] = self.convert_ids_to_tokens(_a ,skip_special_tokens=_a )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
A_ : str = []
A_ : int = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
A_ : List[str] = []
sub_texts.append(_a )
else:
current_sub_text.append(_a )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
A_ : Optional[int] = re.sub(r""" (\[(MASK|SEP)\])""" ,r"""\1""" ,""" """.join(_a ) )
else:
A_ : Tuple = """""".join(_a )
A_ : str = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
A_ : Optional[Any] = self.clean_up_tokenization(_a )
return clean_text
else:
return text
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_a )
elif not os.path.isfile(self.vocab_file ):
with open(_a ,"""wb""" ) as fi:
A_ : str = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
def _a ( self : Optional[Any] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ : List[Any] = [self.cls_token_id]
A_ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self : Optional[int] ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
def _a ( self : Tuple ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Tuple = [self.sep_token_id]
A_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 665 | 1 |
'''simple docstring'''
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.txt'}
__magic_name__ = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
__magic_name__ = {
'openbmb/cpm-ant-10b': 1_024,
}
def lowerCamelCase ( lowerCamelCase : Any):
A_ : List[Any] = collections.OrderedDict()
with open(lowerCamelCase , """r""" , encoding="""utf-8""") as reader:
A_ : Union[str, Any] = reader.readlines()
for index, token in enumerate(lowerCamelCase):
A_ : int = token.rstrip("""\n""")
A_ : str = index
return vocab
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[Any] ,_a : List[str] ,_a : Union[str, Any]="<unk>" ,_a : List[str]=200 ):
'''simple docstring'''
A_ : Optional[Any] = vocab
A_ : str = unk_token
A_ : List[str] = max_input_chars_per_word
def _a ( self : Any ,_a : List[str] ):
'''simple docstring'''
A_ : List[Any] = list(_a )
if len(_a ) > self.max_input_chars_per_word:
return [self.unk_token]
A_ : List[str] = 0
A_ : Optional[Any] = []
while start < len(_a ):
A_ : int = len(_a )
A_ : int = None
while start < end:
A_ : Union[str, Any] = """""".join(chars[start:end] )
if substr in self.vocab:
A_ : Optional[Any] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_a )
A_ : str = end
return sub_tokens
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
a_ = False
def __init__( self : Optional[int] ,_a : str ,_a : List[Any]="<d>" ,_a : str="</d>" ,_a : int="<s>" ,_a : List[Any]="</s>" ,_a : Union[str, Any]="<pad>" ,_a : Tuple="<unk>" ,_a : str="</n>" ,_a : Dict="</_>" ,_a : Optional[int]="left" ,**_a : Optional[int] ,):
'''simple docstring'''
requires_backends(self ,["""jieba"""] )
super().__init__(
bod_token=_a ,eod_token=_a ,bos_token=_a ,eos_token=_a ,pad_token=_a ,unk_token=_a ,line_token=_a ,space_token=_a ,padding_side=_a ,**_a ,)
A_ : List[str] = bod_token
A_ : List[str] = eod_token
A_ : Optional[int] = load_vocab(_a )
A_ : Optional[int] = self.encoder[space_token]
A_ : Optional[int] = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
A_ : Union[str, Any] = collections.OrderedDict(sorted(self.encoder.items() ,key=lambda _a : x[1] ) )
A_ : Dict = {v: k for k, v in self.encoder.items()}
A_ : Dict = WordpieceTokenizer(vocab=self.encoder ,unk_token=self.unk_token )
@property
def _a ( self : Union[str, Any] ):
'''simple docstring'''
return self.encoder[self.bod_token]
@property
def _a ( self : Tuple ):
'''simple docstring'''
return self.encoder[self.eod_token]
@property
def _a ( self : Tuple ):
'''simple docstring'''
return self.encoder["\n"]
@property
def _a ( self : List[str] ):
'''simple docstring'''
return len(self.encoder )
def _a ( self : List[Any] ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def _a ( self : Union[str, Any] ,_a : Dict ):
'''simple docstring'''
A_ : Tuple = []
for x in jieba.cut(_a ,cut_all=_a ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_a ) )
return output_tokens
def _a ( self : Any ,_a : Dict ,**_a : Union[str, Any] ):
'''simple docstring'''
A_ : int = [i for i in token_ids if i >= 0]
A_ : str = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_a ,**_a )
def _a ( self : int ,_a : List[Any] ):
'''simple docstring'''
return token in self.encoder
def _a ( self : Optional[int] ,_a : List[str] ):
'''simple docstring'''
return "".join(_a )
def _a ( self : Any ,_a : List[str] ):
'''simple docstring'''
return self.encoder.get(_a ,self.encoder.get(self.unk_token ) )
def _a ( self : Tuple ,_a : List[Any] ):
'''simple docstring'''
return self.decoder.get(_a ,self.unk_token )
def _a ( self : str ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if os.path.isdir(_a ):
A_ : Union[str, Any] = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
A_ : Tuple = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
A_ : Any = 0
if " " in self.encoder:
A_ : Dict = self.encoder[""" """]
del self.encoder[" "]
if "\n" in self.encoder:
A_ : List[str] = self.encoder["""\n"""]
del self.encoder["\n"]
A_ : List[str] = collections.OrderedDict(sorted(self.encoder.items() ,key=lambda _a : x[1] ) )
with open(_a ,"""w""" ,encoding="""utf-8""" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
""" Please check that the vocabulary is not corrupted!""" )
A_ : Union[str, Any] = token_index
writer.write(token + """\n""" )
index += 1
return (vocab_file,)
def _a ( self : Optional[Any] ,_a : List[int] ,_a : List[int] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _a ( self : int ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a )
if token_ids_a is not None:
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a ))
return [1] + ([0] * len(_a ))
| 665 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
a_ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a_ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _a ( self : List[str] ,_a : int ,_a : Any ,_a : int ):
'''simple docstring'''
A_ : Dict = TextaTextGenerationPipeline(model=_a ,tokenizer=_a )
return generator, ["Something to write", "Something else"]
def _a ( self : str ,_a : Union[str, Any] ,_a : int ):
'''simple docstring'''
A_ : Any = generator("""Something there""" )
self.assertEqual(_a ,[{"""generated_text""": ANY(_a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
A_ : List[Any] = generator(["""This is great !""", """Something else"""] ,num_return_sequences=2 ,do_sample=_a )
self.assertEqual(
_a ,[
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] ,)
A_ : List[str] = generator(
["""This is great !""", """Something else"""] ,num_return_sequences=2 ,batch_size=2 ,do_sample=_a )
self.assertEqual(
_a ,[
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] ,)
with self.assertRaises(_a ):
generator(4 )
@require_torch
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : int = pipeline("""text2text-generation""" ,model="""patrickvonplaten/t5-tiny-random""" ,framework="""pt""" )
# do_sample=False necessary for reproducibility
A_ : Tuple = generator("""Something there""" ,do_sample=_a )
self.assertEqual(_a ,[{"""generated_text""": """"""}] )
A_ : Optional[int] = 3
A_ : Tuple = generator(
"""Something there""" ,num_return_sequences=_a ,num_beams=_a ,)
A_ : Optional[Any] = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(_a ,_a )
A_ : Optional[int] = generator("""This is a test""" ,do_sample=_a ,num_return_sequences=2 ,return_tensors=_a )
self.assertEqual(
_a ,[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] ,)
A_ : Dict = generator.model.config.eos_token_id
A_ : Optional[int] = """<pad>"""
A_ : List[Any] = generator(
["""This is a test""", """This is a second test"""] ,do_sample=_a ,num_return_sequences=2 ,batch_size=2 ,return_tensors=_a ,)
self.assertEqual(
_a ,[
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] ,)
@require_tf
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = pipeline("""text2text-generation""" ,model="""patrickvonplaten/t5-tiny-random""" ,framework="""tf""" )
# do_sample=False necessary for reproducibility
A_ : Dict = generator("""Something there""" ,do_sample=_a )
self.assertEqual(_a ,[{"""generated_text""": """"""}] )
| 665 | 1 |
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
__magic_name__ = logging.getLogger(__name__)
__magic_name__ = 50 # max width of layer names
__magic_name__ = 70 # max width of quantizer names
def lowerCamelCase ( lowerCamelCase : Any):
A_ : Optional[int] = parser.add_argument_group("""quant_trainer arguments""")
group.add_argument("""--wprec""" , type=lowerCamelCase , default=8 , help="""weight precision""")
group.add_argument("""--aprec""" , type=lowerCamelCase , default=8 , help="""activation precision""")
group.add_argument("""--quant-per-tensor""" , action="""store_true""" , help="""per tensor weight scaling""")
group.add_argument("""--quant-disable""" , action="""store_true""" , help="""disable all quantizers""")
group.add_argument("""--quant-disable-embeddings""" , action="""store_true""" , help="""disable all embeddings quantizers""")
group.add_argument("""--quant-disable-keyword""" , type=lowerCamelCase , nargs="""+""" , help="""disable quantizers by keyword""")
group.add_argument("""--quant-disable-layer-module""" , type=lowerCamelCase , help="""disable quantizers by keyword under layer.""")
group.add_argument("""--quant-enable-layer-module""" , type=lowerCamelCase , help="""enable quantizers by keyword under layer""")
group.add_argument("""--calibrator""" , default="""max""" , help="""which quantization range calibrator to use""")
group.add_argument("""--percentile""" , default=lowerCamelCase , type=lowerCamelCase , help="""percentile for PercentileCalibrator""")
group.add_argument("""--fuse-qkv""" , action="""store_true""" , help="""use the same scale factor for qkv""")
group.add_argument("""--clip-gelu""" , metavar="""N""" , type=lowerCamelCase , help="""clip gelu output maximum value to N""")
group.add_argument(
"""--recalibrate-weights""" , action="""store_true""" , help=(
"""recalibrate weight amaxes by taking the max of the weights."""
""" amaxes will be computed with the current quantization granularity (axis)."""
) , )
def lowerCamelCase ( lowerCamelCase : Tuple):
if args.calibrator == "max":
A_ : Union[str, Any] = """max"""
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("""Specify --percentile when using percentile calibrator""")
A_ : Dict = """histogram"""
elif args.calibrator == "mse":
A_ : Union[str, Any] = """histogram"""
else:
raise ValueError(F'Invalid calibrator {args.calibrator}')
A_ : str = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCamelCase)
A_ : Tuple = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)))
quant_nn.QuantLinear.set_default_quant_desc_input(lowerCamelCase)
quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Dict , lowerCamelCase : str=False , lowerCamelCase : Any=False):
logger.info("""Configuring Model for Quantization""")
logger.info(F'using quantization package {pytorch_quantization.__file__}')
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(lowerCamelCase , ["""embeddings"""] , which="""weight""" , _disabled=lowerCamelCase)
if args.quant_disable:
set_quantizer_by_name(lowerCamelCase , [""""""] , _disabled=lowerCamelCase)
if args.quant_disable_keyword:
set_quantizer_by_name(lowerCamelCase , args.quant_disable_keyword , _disabled=lowerCamelCase)
if args.quant_disable_layer_module:
set_quantizer_by_name(lowerCamelCase , [r"""layer.\d+.""" + args.quant_disable_layer_module] , _disabled=lowerCamelCase)
if args.quant_enable_layer_module:
set_quantizer_by_name(lowerCamelCase , [r"""layer.\d+.""" + args.quant_enable_layer_module] , _disabled=lowerCamelCase)
if args.recalibrate_weights:
recalibrate_weights(lowerCamelCase)
if args.fuse_qkv:
fuse_qkv(lowerCamelCase , lowerCamelCase)
if args.clip_gelu:
clip_gelu(lowerCamelCase , args.clip_gelu)
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : Dict):
logger.info("""Enabling Calibration""")
for name, module in model.named_modules():
if name.endswith("""_quantizer"""):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F'{name:80}: {module}')
def lowerCamelCase ( lowerCamelCase : Dict , lowerCamelCase : Union[str, Any]):
logger.info("""Loading calibrated amax""")
for name, module in model.named_modules():
if name.endswith("""_quantizer"""):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator):
module.load_calib_amax()
else:
module.load_calib_amax("""percentile""" , percentile=args.percentile)
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : Dict):
def fusea(lowerCamelCase : int , lowerCamelCase : Dict , lowerCamelCase : str):
for mod in [qq, qk, qv]:
if not hasattr(lowerCamelCase , """_amax"""):
print(""" WARNING: NO AMAX BUFFER""")
return
A_ : Dict = qq._amax.detach().item()
A_ : int = qk._amax.detach().item()
A_ : Union[str, Any] = qv._amax.detach().item()
A_ : Any = max(lowerCamelCase , lowerCamelCase , lowerCamelCase)
qq._amax.fill_(lowerCamelCase)
qk._amax.fill_(lowerCamelCase)
qv._amax.fill_(lowerCamelCase)
logger.info(F' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}')
for name, mod in model.named_modules():
if name.endswith(""".attention.self"""):
logger.info(F'FUSE_QKV: {name:{name_width}}')
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer)
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer)
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Optional[Any]):
for name, mod in model.named_modules():
if name.endswith(""".output.dense""") and not name.endswith("""attention.output.dense"""):
A_ : Union[str, Any] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=lowerCamelCase)
A_ : Union[str, Any] = mod._input_quantizer._amax.data.detach().item()
logger.info(F'CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}')
def lowerCamelCase ( lowerCamelCase : Optional[Any]):
for name, mod in model.named_modules():
if hasattr(lowerCamelCase , """_weight_quantizer""") and mod._weight_quantizer.axis is not None:
A_ : List[str] = mod.weight.shape[0]
A_ : Tuple = mod._weight_quantizer._amax.detach()
A_ : int = torch.ones(lowerCamelCase , dtype=amax.dtype , device=amax.device) * amax
print(F'expanding {name} {amax} -> {mod._weight_quantizer._amax}')
def lowerCamelCase ( lowerCamelCase : List[str]):
for name, mod in model.named_modules():
if hasattr(lowerCamelCase , """_weight_quantizer"""):
if not hasattr(mod.weight_quantizer , """_amax"""):
print("""RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER""")
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
A_ : Dict = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis)
A_ : int = set(range(len(mod.weight.size()))) - axis_set
A_ : Any = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCamelCase , keepdims=lowerCamelCase).detach()
logger.info(F'RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}')
A_ : int = amax
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Dict=25 , lowerCamelCase : Union[str, Any]=180 , lowerCamelCase : int=None):
if ignore is None:
A_ : List[Any] = []
elif not isinstance(lowerCamelCase , lowerCamelCase):
A_ : Optional[Any] = [ignore]
A_ : Optional[int] = 0
for name, mod in model.named_modules():
if not hasattr(lowerCamelCase , """weight"""):
continue
A_ : str = max(lowerCamelCase , len(lowerCamelCase))
for name, mod in model.named_modules():
A_ : Any = getattr(lowerCamelCase , """_input_quantizer""" , lowerCamelCase)
A_ : Optional[int] = getattr(lowerCamelCase , """_weight_quantizer""" , lowerCamelCase)
if not hasattr(lowerCamelCase , """weight"""):
continue
if type(lowerCamelCase) in ignore:
continue
if [True for s in ignore if type(lowerCamelCase) is str and s in name]:
continue
A_ : Optional[Any] = F'Act:{input_q.extra_repr()}'
A_ : List[str] = F'Wgt:{weight_q.extra_repr()}'
A_ : Tuple = F'{name:{name_width}} {act_str} {wgt_str}'
if len(lowerCamelCase) <= line_width:
logger.info(lowerCamelCase)
else:
logger.info(F'{name:{name_width}} {act_str}')
logger.info(F'{" ":{name_width}} {wgt_str}')
def lowerCamelCase ( lowerCamelCase : int):
A_ : Optional[Any] = 0
for name, mod in model.named_modules():
if isinstance(lowerCamelCase , pytorch_quantization.nn.TensorQuantizer):
print(F'{name:80} {mod}')
count += 1
print(F'{count} TensorQuantizers found in model')
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : int , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int]):
A_ : Optional[Any] = getattr(lowerCamelCase , lowerCamelCase , lowerCamelCase)
if quantizer_mod is not None:
assert hasattr(lowerCamelCase , lowerCamelCase)
setattr(lowerCamelCase , lowerCamelCase , lowerCamelCase)
else:
logger.warning(F'{name} has no {quantizer}')
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : Optional[int] , lowerCamelCase : List[Any]="both" , **lowerCamelCase : Tuple):
A_ : Dict = F'Warning: changing {which} quantizers of {name:{qname_width}}'
for k, v in kwargs.items():
s += F' {k}={v}'
if which in ["input", "both"]:
set_quantizer(lowerCamelCase , lowerCamelCase , """_input_quantizer""" , lowerCamelCase , lowerCamelCase)
if which in ["weight", "both"]:
set_quantizer(lowerCamelCase , lowerCamelCase , """_weight_quantizer""" , lowerCamelCase , lowerCamelCase)
logger.info(lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Tuple , **lowerCamelCase : Tuple):
for name, mod in model.named_modules():
if hasattr(lowerCamelCase , """_input_quantizer""") or hasattr(lowerCamelCase , """_weight_quantizer"""):
for n in names:
if re.search(lowerCamelCase , lowerCamelCase):
set_quantizers(lowerCamelCase , lowerCamelCase , **lowerCamelCase)
elif name.endswith("""_quantizer"""):
for n in names:
if re.search(lowerCamelCase , lowerCamelCase):
A_ : Optional[Any] = F'Warning: changing {name:{name_width}}'
for k, v in kwargs.items():
s += F' {k}={v}'
setattr(lowerCamelCase , lowerCamelCase , lowerCamelCase)
logger.info(lowerCamelCase)
| 665 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """gpt_bigcode"""
a_ = ["""past_key_values"""]
a_ = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Optional[int] ,_a : Optional[int]=50257 ,_a : Dict=1024 ,_a : Union[str, Any]=768 ,_a : Union[str, Any]=12 ,_a : Union[str, Any]=12 ,_a : Tuple=None ,_a : int="gelu_pytorch_tanh" ,_a : Optional[Any]=0.1 ,_a : List[str]=0.1 ,_a : Union[str, Any]=0.1 ,_a : List[Any]=1e-5 ,_a : List[str]=0.02 ,_a : Any=True ,_a : Union[str, Any]=True ,_a : Tuple=50256 ,_a : Optional[int]=50256 ,_a : int=True ,_a : Optional[int]=True ,_a : Optional[int]=True ,**_a : List[str] ,):
'''simple docstring'''
A_ : Optional[Any] = vocab_size
A_ : int = n_positions
A_ : Union[str, Any] = n_embd
A_ : int = n_layer
A_ : Optional[int] = n_head
A_ : Union[str, Any] = n_inner
A_ : List[Any] = activation_function
A_ : Dict = resid_pdrop
A_ : int = embd_pdrop
A_ : Optional[int] = attn_pdrop
A_ : Union[str, Any] = layer_norm_epsilon
A_ : int = initializer_range
A_ : Union[str, Any] = scale_attn_weights
A_ : List[str] = use_cache
A_ : Tuple = attention_softmax_in_fpaa
A_ : List[str] = scale_attention_softmax_in_fpaa
A_ : Union[str, Any] = multi_query
A_ : Any = bos_token_id
A_ : Optional[int] = eos_token_id
super().__init__(bos_token_id=_a ,eos_token_id=_a ,**_a )
| 665 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'openai/whisper-base': 'https://huggingface.co/openai/whisper-base/resolve/main/config.json',
}
# fmt: off
__magic_name__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
__magic_name__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """whisper"""
a_ = ["""past_key_values"""]
a_ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : str ,_a : Tuple=51865 ,_a : Dict=80 ,_a : int=6 ,_a : str=4 ,_a : Union[str, Any]=6 ,_a : Tuple=4 ,_a : str=1536 ,_a : int=1536 ,_a : Dict=0.0 ,_a : List[Any]=0.0 ,_a : List[str]=50257 ,_a : Tuple=True ,_a : Tuple=True ,_a : Tuple="gelu" ,_a : List[Any]=256 ,_a : Optional[int]=0.0 ,_a : List[Any]=0.0 ,_a : Optional[Any]=0.0 ,_a : str=0.02 ,_a : Optional[int]=False ,_a : List[str]=1500 ,_a : Tuple=448 ,_a : List[str]=50256 ,_a : Dict=50256 ,_a : Dict=50256 ,_a : Tuple=None ,_a : Any=[220, 50256] ,_a : Dict=False ,_a : Any=256 ,_a : Optional[int]=False ,_a : Optional[Any]=0.05 ,_a : Union[str, Any]=10 ,_a : List[str]=2 ,_a : str=0.0 ,_a : Dict=10 ,_a : Optional[Any]=0 ,_a : Any=7 ,**_a : Tuple ,):
'''simple docstring'''
A_ : List[Any] = vocab_size
A_ : int = num_mel_bins
A_ : Optional[int] = d_model
A_ : List[str] = encoder_layers
A_ : Optional[Any] = encoder_attention_heads
A_ : List[Any] = decoder_layers
A_ : Optional[Any] = decoder_attention_heads
A_ : List[Any] = decoder_ffn_dim
A_ : Union[str, Any] = encoder_ffn_dim
A_ : Optional[Any] = dropout
A_ : Union[str, Any] = attention_dropout
A_ : Any = activation_dropout
A_ : Optional[Any] = activation_function
A_ : List[str] = init_std
A_ : Dict = encoder_layerdrop
A_ : Dict = decoder_layerdrop
A_ : Any = use_cache
A_ : List[Any] = encoder_layers
A_ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
A_ : Tuple = max_source_positions
A_ : Dict = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
A_ : List[str] = classifier_proj_size
A_ : Tuple = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A_ : Tuple = apply_spec_augment
A_ : Union[str, Any] = mask_time_prob
A_ : Optional[int] = mask_time_length
A_ : int = mask_time_min_masks
A_ : Optional[int] = mask_feature_prob
A_ : Optional[int] = mask_feature_length
A_ : Any = mask_feature_min_masks
A_ : Any = median_filter_width
super().__init__(
pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,is_encoder_decoder=_a ,decoder_start_token_id=_a ,suppress_tokens=_a ,begin_suppress_tokens=_a ,**_a ,)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _a ( self : List[str] ):
'''simple docstring'''
A_ : List[str] = OrderedDict(
[
("""input_features""", {0: """batch""", 1: """feature_size""", 2: """encoder_sequence"""}),
] )
if self.use_past:
A_ : Optional[int] = {0: """batch"""}
else:
A_ : Optional[int] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(_a ,direction="""inputs""" )
return common_inputs
def _a ( self : List[str] ,_a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] ,_a : int = -1 ,_a : int = -1 ,_a : bool = False ,_a : Optional["TensorType"] = None ,_a : int = 22050 ,_a : float = 5.0 ,_a : int = 220 ,):
'''simple docstring'''
A_ : Tuple = OrderedDict()
A_ : List[str] = OnnxConfig.generate_dummy_inputs(
self ,preprocessor=preprocessor.feature_extractor ,batch_size=_a ,framework=_a ,sampling_rate=_a ,time_duration=_a ,frequency=_a ,)
A_ : List[Any] = encoder_inputs["""input_features"""].shape[2]
A_ : Optional[Any] = encoder_sequence_length // 2 if self.use_past else seq_length
A_ : Any = super().generate_dummy_inputs(
preprocessor.tokenizer ,_a ,_a ,_a ,_a )
A_ : Any = encoder_inputs.pop("""input_features""" )
A_ : Union[str, Any] = decoder_inputs.pop("""decoder_input_ids""" )
if "past_key_values" in decoder_inputs:
A_ : Any = decoder_inputs.pop("""past_key_values""" )
return dummy_inputs
@property
def _a ( self : Dict ):
'''simple docstring'''
return 1e-3
| 665 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
__magic_name__ = {
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
__magic_name__ = {
'allenai/longformer-base-4096': 4_096,
'allenai/longformer-large-4096': 4_096,
'allenai/longformer-large-4096-finetuned-triviaqa': 4_096,
'allenai/longformer-base-4096-extra.pos.embd.only': 4_096,
'allenai/longformer-large-4096-extra.pos.embd.only': 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCamelCase ( ):
A_ : Union[str, Any] = (
list(range(ord("""!""") , ord("""~""") + 1)) + list(range(ord("""¡""") , ord("""¬""") + 1)) + list(range(ord("""®""") , ord("""ÿ""") + 1))
)
A_ : Optional[Any] = bs[:]
A_ : List[str] = 0
for b in range(2**8):
if b not in bs:
bs.append(lowerCamelCase)
cs.append(2**8 + n)
n += 1
A_ : List[Any] = [chr(lowerCamelCase) for n in cs]
return dict(zip(lowerCamelCase , lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : int):
A_ : int = set()
A_ : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
A_ : List[str] = char
return pairs
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self : int ,_a : Tuple ,_a : Union[str, Any] ,_a : Optional[Any]="replace" ,_a : Union[str, Any]="<s>" ,_a : Union[str, Any]="</s>" ,_a : int="</s>" ,_a : List[str]="<s>" ,_a : List[Any]="<unk>" ,_a : Any="<pad>" ,_a : Dict="<mask>" ,_a : Optional[int]=False ,**_a : List[Any] ,):
'''simple docstring'''
A_ : Dict = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else bos_token
A_ : Optional[int] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else eos_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else sep_token
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else cls_token
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else unk_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A_ : Any = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
super().__init__(
errors=_a ,bos_token=_a ,eos_token=_a ,unk_token=_a ,sep_token=_a ,cls_token=_a ,pad_token=_a ,mask_token=_a ,add_prefix_space=_a ,**_a ,)
with open(_a ,encoding="""utf-8""" ) as vocab_handle:
A_ : str = json.load(_a )
A_ : Optional[int] = {v: k for k, v in self.encoder.items()}
A_ : List[str] = errors # how to handle errors in decoding
A_ : List[str] = bytes_to_unicode()
A_ : str = {v: k for k, v in self.byte_encoder.items()}
with open(_a ,encoding="""utf-8""" ) as merges_handle:
A_ : Any = merges_handle.read().split("""\n""" )[1:-1]
A_ : str = [tuple(merge.split() ) for merge in bpe_merges]
A_ : int = dict(zip(_a ,range(len(_a ) ) ) )
A_ : List[Any] = {}
A_ : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A_ : Optional[Any] = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def _a ( self : Any ):
'''simple docstring'''
return len(self.encoder )
def _a ( self : str ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def _a ( self : int ,_a : int ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A_ : Optional[int] = tuple(_a )
A_ : Any = get_pairs(_a )
if not pairs:
return token
while True:
A_ : Optional[Any] = min(_a ,key=lambda _a : self.bpe_ranks.get(_a ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A_ , A_ : Dict = bigram
A_ : int = []
A_ : Optional[Any] = 0
while i < len(_a ):
try:
A_ : List[str] = word.index(_a ,_a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A_ : Tuple = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A_ : str = tuple(_a )
A_ : str = new_word
if len(_a ) == 1:
break
else:
A_ : int = get_pairs(_a )
A_ : Optional[int] = """ """.join(_a )
A_ : List[str] = word
return word
def _a ( self : Dict ,_a : Optional[int] ):
'''simple docstring'''
A_ : Any = []
for token in re.findall(self.pat ,_a ):
A_ : Any = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_a ).split(""" """ ) )
return bpe_tokens
def _a ( self : Union[str, Any] ,_a : Optional[int] ):
'''simple docstring'''
return self.encoder.get(_a ,self.encoder.get(self.unk_token ) )
def _a ( self : int ,_a : Dict ):
'''simple docstring'''
return self.decoder.get(_a )
def _a ( self : Optional[int] ,_a : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = """""".join(_a )
A_ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors )
return text
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_a ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_a ,ensure_ascii=_a ) + """\n""" )
A_ : int = 0
with open(_a ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _a : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
A_ : Dict = token_index
writer.write(""" """.join(_a ) + """\n""" )
index += 1
return vocab_file, merge_file
def _a ( self : List[str] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ : int = [self.cls_token_id]
A_ : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _a ( self : int ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def _a ( self : Any ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Union[str, Any] = [self.sep_token_id]
A_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a ( self : str ,_a : Optional[int] ,_a : Union[str, Any]=False ,**_a : Dict ):
'''simple docstring'''
A_ : Any = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_a ) > 0 and not text[0].isspace()):
A_ : Optional[int] = """ """ + text
return (text, kwargs)
| 665 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """pegasus"""
a_ = ["""past_key_values"""]
a_ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Dict ,_a : Any=50265 ,_a : List[Any]=1024 ,_a : Optional[Any]=12 ,_a : int=4096 ,_a : List[Any]=16 ,_a : Optional[int]=12 ,_a : int=4096 ,_a : Dict=16 ,_a : Optional[int]=0.0 ,_a : Optional[Any]=0.0 ,_a : str=True ,_a : Tuple=True ,_a : List[Any]="gelu" ,_a : Tuple=1024 ,_a : Union[str, Any]=0.1 ,_a : List[str]=0.0 ,_a : Dict=0.0 ,_a : Dict=0.02 ,_a : List[str]=0 ,_a : Union[str, Any]=False ,_a : Tuple=0 ,_a : List[Any]=1 ,_a : int=1 ,**_a : List[Any] ,):
'''simple docstring'''
A_ : Union[str, Any] = vocab_size
A_ : Optional[int] = max_position_embeddings
A_ : str = d_model
A_ : List[Any] = encoder_ffn_dim
A_ : int = encoder_layers
A_ : List[Any] = encoder_attention_heads
A_ : List[Any] = decoder_ffn_dim
A_ : int = decoder_layers
A_ : Any = decoder_attention_heads
A_ : Optional[Any] = dropout
A_ : Optional[Any] = attention_dropout
A_ : Optional[int] = activation_dropout
A_ : Optional[Any] = activation_function
A_ : Union[str, Any] = init_std
A_ : Tuple = encoder_layerdrop
A_ : Optional[Any] = decoder_layerdrop
A_ : List[Any] = use_cache
A_ : List[str] = encoder_layers
A_ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_a ,eos_token_id=_a ,is_encoder_decoder=_a ,decoder_start_token_id=_a ,forced_eos_token_id=_a ,**_a ,)
@property
def _a ( self : Optional[int] ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def _a ( self : int ):
'''simple docstring'''
return self.d_model
| 665 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.txt'}
__magic_name__ = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__magic_name__ = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__magic_name__ = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_INIT_CONFIGURATION
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ConvBertTokenizer
def __init__( self : str ,_a : Dict=None ,_a : List[Any]=None ,_a : Dict=True ,_a : List[str]="[UNK]" ,_a : Any="[SEP]" ,_a : str="[PAD]" ,_a : List[Any]="[CLS]" ,_a : List[str]="[MASK]" ,_a : Union[str, Any]=True ,_a : Any=None ,**_a : Optional[int] ,):
'''simple docstring'''
super().__init__(
_a ,tokenizer_file=_a ,do_lower_case=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,tokenize_chinese_chars=_a ,strip_accents=_a ,**_a ,)
A_ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,_a ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,_a ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,_a ) != tokenize_chinese_chars
):
A_ : Dict = getattr(_a ,normalizer_state.pop("""type""" ) )
A_ : str = do_lower_case
A_ : Any = strip_accents
A_ : int = tokenize_chinese_chars
A_ : Tuple = normalizer_class(**_a )
A_ : Any = do_lower_case
def _a ( self : List[Any] ,_a : List[Any] ,_a : Any=None ):
'''simple docstring'''
A_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a ( self : Dict ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : int = [self.sep_token_id]
A_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
A_ : List[Any] = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
| 665 | 1 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[str]):
A_ : Optional[int] = []
for part_id in partition_order:
A_ : Union[str, Any] = df.where(F'SPARK_PARTITION_ID() = {part_id}').collect()
for row_idx, row in enumerate(lowerCamelCase):
expected_row_ids_and_row_dicts.append((F'{part_id}_{row_idx}', row.asDict()))
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase ( ):
A_ : int = pyspark.sql.SparkSession.builder.master("""local[*]""").appName("""pyspark""").getOrCreate()
A_ : Union[str, Any] = spark.range(100).repartition(1)
A_ : Dict = Spark(lowerCamelCase)
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16)
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase ( ):
A_ : Optional[int] = pyspark.sql.SparkSession.builder.master("""local[*]""").appName("""pyspark""").getOrCreate()
A_ : Optional[int] = spark.range(10).repartition(2)
A_ : Optional[Any] = [1, 0]
A_ : Any = _generate_iterable_examples(lowerCamelCase , lowerCamelCase) # Reverse the partitions.
A_ : Any = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCamelCase , lowerCamelCase)
for i, (row_id, row_dict) in enumerate(generate_fn()):
A_ , A_ : Optional[int] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase ( ):
A_ : Optional[int] = pyspark.sql.SparkSession.builder.master("""local[*]""").appName("""pyspark""").getOrCreate()
A_ : List[Any] = spark.range(10).repartition(1)
A_ : str = SparkExamplesIterable(lowerCamelCase)
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(lowerCamelCase):
assert row_id == F'0_{i}'
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase ( ):
A_ : List[str] = pyspark.sql.SparkSession.builder.master("""local[*]""").appName("""pyspark""").getOrCreate()
A_ : str = spark.range(30).repartition(3)
# Mock the generator so that shuffle reverses the partition indices.
with patch("""numpy.random.Generator""") as generator_mock:
A_ : Optional[int] = lambda lowerCamelCase: x.reverse()
A_ : Dict = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCamelCase , [2, 1, 0])
A_ : str = SparkExamplesIterable(lowerCamelCase).shuffle_data_sources(lowerCamelCase)
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(lowerCamelCase):
A_ , A_ : List[str] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase ( ):
A_ : Optional[Any] = pyspark.sql.SparkSession.builder.master("""local[*]""").appName("""pyspark""").getOrCreate()
A_ : str = spark.range(20).repartition(4)
# Partitions 0 and 2
A_ : Dict = SparkExamplesIterable(lowerCamelCase).shard_data_sources(worker_id=0 , num_workers=2)
assert shard_it_a.n_shards == 2
A_ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCamelCase , [0, 2])
for i, (row_id, row_dict) in enumerate(lowerCamelCase):
A_ , A_ : Union[str, Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
A_ : Optional[int] = SparkExamplesIterable(lowerCamelCase).shard_data_sources(worker_id=1 , num_workers=2)
assert shard_it_a.n_shards == 2
A_ : Any = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCamelCase , [1, 3])
for i, (row_id, row_dict) in enumerate(lowerCamelCase):
A_ , A_ : Union[str, Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase ( ):
A_ : List[Any] = pyspark.sql.SparkSession.builder.master("""local[*]""").appName("""pyspark""").getOrCreate()
A_ : Any = spark.range(100).repartition(1)
A_ : str = Spark(lowerCamelCase)
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1)
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 665 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all BART models at https://huggingface.co/models?filter=bart
__magic_name__ = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
'tokenizer_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json',
},
}
__magic_name__ = {
'facebook/bart-base': 1_024,
'facebook/bart-large': 1_024,
'facebook/bart-large-mnli': 1_024,
'facebook/bart-large-cnn': 1_024,
'facebook/bart-large-xsum': 1_024,
'yjernite/bart_eli5': 1_024,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
a_ = BartTokenizer
def __init__( self : str ,_a : Any=None ,_a : Optional[int]=None ,_a : int=None ,_a : Optional[int]="replace" ,_a : Dict="<s>" ,_a : Optional[Any]="</s>" ,_a : Dict="</s>" ,_a : Tuple="<s>" ,_a : Optional[Any]="<unk>" ,_a : List[str]="<pad>" ,_a : int="<mask>" ,_a : str=False ,_a : List[str]=True ,**_a : Dict ,):
'''simple docstring'''
super().__init__(
_a ,_a ,tokenizer_file=_a ,errors=_a ,bos_token=_a ,eos_token=_a ,sep_token=_a ,cls_token=_a ,unk_token=_a ,pad_token=_a ,mask_token=_a ,add_prefix_space=_a ,trim_offsets=_a ,**_a ,)
A_ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" ,_a ) != add_prefix_space:
A_ : List[str] = getattr(_a ,pre_tok_state.pop("""type""" ) )
A_ : Optional[int] = add_prefix_space
A_ : int = pre_tok_class(**_a )
A_ : str = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A_ : str = """post_processor"""
A_ : List[Any] = getattr(self.backend_tokenizer ,_a ,_a )
if tokenizer_component_instance:
A_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A_ : Tuple = tuple(state["""sep"""] )
if "cls" in state:
A_ : Tuple = tuple(state["""cls"""] )
A_ : List[str] = False
if state.get("""add_prefix_space""" ,_a ) != add_prefix_space:
A_ : Dict = add_prefix_space
A_ : Any = True
if state.get("""trim_offsets""" ,_a ) != trim_offsets:
A_ : Union[str, Any] = trim_offsets
A_ : List[Any] = True
if changes_to_apply:
A_ : Optional[int] = getattr(_a ,state.pop("""type""" ) )
A_ : Tuple = component_class(**_a )
setattr(self.backend_tokenizer ,_a ,_a )
@property
def _a ( self : List[str] ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def _a ( self : Union[str, Any] ,_a : Any ):
'''simple docstring'''
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else value
A_ : List[Any] = value
def _a ( self : str ,*_a : str ,**_a : Optional[int] ):
'''simple docstring'''
A_ : Optional[Any] = kwargs.get("""is_split_into_words""" ,_a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*_a ,**_a )
def _a ( self : str ,*_a : List[Any] ,**_a : str ):
'''simple docstring'''
A_ : List[str] = kwargs.get("""is_split_into_words""" ,_a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*_a ,**_a )
def _a ( self : Optional[int] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
A_ : str = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
def _a ( self : str ,_a : Optional[int] ,_a : int=None ):
'''simple docstring'''
A_ : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _a ( self : Optional[int] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Dict = [self.sep_token_id]
A_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 665 | 1 |
'''simple docstring'''
from jiwer import compute_measures
import datasets
__magic_name__ = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
__magic_name__ = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
__magic_name__ = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def _a ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ,id="""sequence""" ),
"""references""": datasets.Value("""string""" ,id="""sequence""" ),
} ) ,codebase_urls=["""https://github.com/jitsi/jiwer/"""] ,reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] ,)
def _a ( self : Dict ,_a : str=None ,_a : Union[str, Any]=None ,_a : List[str]=False ):
'''simple docstring'''
if concatenate_texts:
return compute_measures(_a ,_a )["wer"]
else:
A_ : Tuple = 0
A_ : str = 0
for prediction, reference in zip(_a ,_a ):
A_ : Dict = compute_measures(_a ,_a )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 665 |
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : str):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
A_ : int = TapasConfig.from_json_file(lowerCamelCase)
# set absolute/relative position embeddings parameter
A_ : List[Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
A_ : Optional[int] = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "WTQ":
# run_task_main.py hparams
A_ : Tuple = 4
A_ : Optional[Any] = True
# hparam_utils.py hparams
A_ : Any = 0.66_4694
A_ : str = 0.20_7951
A_ : Any = 0.12_1194
A_ : str = True
A_ : Dict = True
A_ : int = False
A_ : int = 0.035_2513
A_ : Tuple = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
A_ : int = 4
A_ : Union[str, Any] = False
# hparam_utils.py hparams
A_ : Dict = 36.4519
A_ : List[Any] = 0.90_3421
A_ : Any = 222.088
A_ : Optional[Any] = True
A_ : Optional[int] = True
A_ : Optional[Any] = True
A_ : Optional[int] = 0.76_3141
A_ : Any = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "TABFACT":
A_ : Any = TapasForSequenceClassification(config=lowerCamelCase)
elif task == "MLM":
A_ : List[Any] = TapasForMaskedLM(config=lowerCamelCase)
elif task == "INTERMEDIATE_PRETRAINING":
A_ : Union[str, Any] = TapasModel(config=lowerCamelCase)
else:
raise ValueError(F'Task {task} not supported.')
print(F'Building PyTorch model from configuration: {config}')
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Save pytorch-model (weights and configuration)
print(F'Save PyTorch model to {pytorch_dump_path}')
model.save_pretrained(lowerCamelCase)
# Save tokenizer files
print(F'Save tokenizer files to {pytorch_dump_path}')
A_ : Optional[Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512)
tokenizer.save_pretrained(lowerCamelCase)
print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__magic_name__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 665 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ = {'configuration_timm_backbone': ['TimmBackboneConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['TimmBackbone']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 |
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""vqvae"""]
def __init__( self : Optional[Any] ,_a : AutoencoderKL ,_a : UNetaDConditionModel ,_a : Mel ,_a : Union[DDIMScheduler, DDPMScheduler] ,):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_a ,scheduler=_a ,mel=_a ,vqvae=_a )
def _a ( self : str ):
'''simple docstring'''
return 50 if isinstance(self.scheduler ,_a ) else 1000
@torch.no_grad()
def __call__( self : Optional[int] ,_a : int = 1 ,_a : str = None ,_a : np.ndarray = None ,_a : int = 0 ,_a : int = 0 ,_a : int = None ,_a : torch.Generator = None ,_a : float = 0 ,_a : float = 0 ,_a : torch.Generator = None ,_a : float = 0 ,_a : torch.Tensor = None ,_a : torch.Tensor = None ,_a : int=True ,):
'''simple docstring'''
A_ : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(_a )
A_ : Union[str, Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
A_ : Tuple = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
A_ : int = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) ,generator=_a ,device=self.device ,)
A_ : List[Any] = noise
A_ : str = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_a ,_a )
A_ : Any = self.mel.audio_slice_to_image(_a )
A_ : Union[str, Any] = np.frombuffer(input_image.tobytes() ,dtype="""uint8""" ).reshape(
(input_image.height, input_image.width) )
A_ : Optional[Any] = (input_image / 255) * 2 - 1
A_ : Union[str, Any] = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device )
if self.vqvae is not None:
A_ : Union[str, Any] = self.vqvae.encode(torch.unsqueeze(_a ,0 ) ).latent_dist.sample(
generator=_a )[0]
A_ : List[str] = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
A_ : Any = self.scheduler.add_noise(_a ,_a ,self.scheduler.timesteps[start_step - 1] )
A_ : Tuple = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
A_ : Tuple = int(mask_start_secs * pixels_per_second )
A_ : str = int(mask_end_secs * pixels_per_second )
A_ : int = self.scheduler.add_noise(_a ,_a ,torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet ,_a ):
A_ : Optional[Any] = self.unet(_a ,_a ,_a )["""sample"""]
else:
A_ : List[Any] = self.unet(_a ,_a )["""sample"""]
if isinstance(self.scheduler ,_a ):
A_ : Dict = self.scheduler.step(
model_output=_a ,timestep=_a ,sample=_a ,eta=_a ,generator=_a ,)["""prev_sample"""]
else:
A_ : Any = self.scheduler.step(
model_output=_a ,timestep=_a ,sample=_a ,generator=_a ,)["""prev_sample"""]
if mask is not None:
if mask_start > 0:
A_ : Tuple = mask[:, step, :, :mask_start]
if mask_end > 0:
A_ : List[str] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
A_ : str = 1 / self.vqvae.config.scaling_factor * images
A_ : Union[str, Any] = self.vqvae.decode(_a )["""sample"""]
A_ : int = (images / 2 + 0.5).clamp(0 ,1 )
A_ : str = images.cpu().permute(0 ,2 ,3 ,1 ).numpy()
A_ : Optional[int] = (images * 255).round().astype("""uint8""" )
A_ : List[Any] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_a ,mode="""RGB""" ).convert("""L""" ) for _ in images) )
A_ : Tuple = [self.mel.image_to_audio(_a ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_a )[:, np.newaxis, :] ) ,**ImagePipelineOutput(_a ) )
@torch.no_grad()
def _a ( self : Union[str, Any] ,_a : List[Image.Image] ,_a : int = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler ,_a )
self.scheduler.set_timesteps(_a )
A_ : Optional[Any] = np.array(
[np.frombuffer(image.tobytes() ,dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] )
A_ : List[str] = (sample / 255) * 2 - 1
A_ : Optional[int] = torch.Tensor(_a ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ):
A_ : List[str] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
A_ : Any = self.scheduler.alphas_cumprod[t]
A_ : List[Any] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
A_ : str = 1 - alpha_prod_t
A_ : List[str] = self.unet(_a ,_a )["""sample"""]
A_ : str = (1 - alpha_prod_t_prev) ** 0.5 * model_output
A_ : Union[str, Any] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
A_ : Optional[int] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def _a ( _a : torch.Tensor ,_a : torch.Tensor ,_a : float ):
'''simple docstring'''
A_ : List[Any] = acos(torch.dot(torch.flatten(_a ) ,torch.flatten(_a ) ) / torch.norm(_a ) / torch.norm(_a ) )
return sin((1 - alpha) * theta ) * xa / sin(_a ) + sin(alpha * theta ) * xa / sin(_a )
| 665 | 1 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ,_a : List[Any] ,_a : Dict=13 ,_a : List[str]=7 ,_a : Dict=True ,_a : List[Any]=True ,_a : Dict=False ,_a : Optional[int]=True ,_a : List[Any]=99 ,_a : Any=32 ,_a : Optional[int]=5 ,_a : List[Any]=4 ,_a : int=37 ,_a : List[Any]="gelu" ,_a : List[str]=0.1 ,_a : Union[str, Any]=0.1 ,_a : Any=512 ,_a : int=16 ,_a : Optional[int]=2 ,_a : Any=0.02 ,_a : Any=3 ,_a : Any=4 ,_a : List[str]=None ,):
'''simple docstring'''
A_ : List[str] = parent
A_ : Any = batch_size
A_ : Tuple = seq_length
A_ : List[str] = is_training
A_ : Tuple = use_input_mask
A_ : Dict = use_token_type_ids
A_ : List[Any] = use_labels
A_ : Union[str, Any] = vocab_size
A_ : Any = hidden_size
A_ : str = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : str = intermediate_size
A_ : Tuple = hidden_act
A_ : Any = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : List[str] = max_position_embeddings
A_ : int = type_vocab_size
A_ : Union[str, Any] = type_sequence_label_size
A_ : Any = initializer_range
A_ : List[Any] = num_labels
A_ : Optional[Any] = num_choices
A_ : List[Any] = scope
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A_ : int = None
if self.use_input_mask:
A_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : Dict = None
if self.use_token_type_ids:
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A_ : str = None
A_ : Any = None
A_ : str = None
if self.use_labels:
A_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A_ : Optional[int] = ids_tensor([self.batch_size] ,self.num_choices )
A_ : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : Optional[Any] ):
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_a ,initializer_range=self.initializer_range ,)
def _a ( self : Union[str, Any] ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Any ,_a : Any ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Tuple ):
'''simple docstring'''
A_ : Any = LlamaModel(config=_a )
model.to(_a )
model.eval()
A_ : Optional[Any] = model(_a ,attention_mask=_a )
A_ : Optional[int] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Optional[int] ,_a : int ,_a : List[str] ,_a : Any ,_a : Any ,_a : Dict ,_a : List[str] ,_a : Optional[int] ,_a : Any ,_a : List[str] ,):
'''simple docstring'''
A_ : List[str] = True
A_ : Union[str, Any] = LlamaModel(_a )
model.to(_a )
model.eval()
A_ : Tuple = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,)
A_ : List[Any] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,)
A_ : int = model(_a ,attention_mask=_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Any ,_a : Any ,_a : Optional[int] ,_a : List[Any] ,_a : List[Any] ,_a : Dict ,_a : Tuple ,_a : Optional[int] ,_a : List[Any] ,_a : Union[str, Any] ,):
'''simple docstring'''
A_ : List[Any] = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
A_ : Dict = model(_a ,attention_mask=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : str ,_a : List[Any] ,_a : Dict ,_a : str ,_a : Tuple ,_a : Tuple ,_a : Tuple ,_a : Optional[Any] ,_a : Dict ,_a : Union[str, Any] ,):
'''simple docstring'''
A_ : Optional[Any] = True
A_ : Any = True
A_ : Tuple = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
# first forward pass
A_ : Optional[int] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,use_cache=_a ,)
A_ : Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A_ : int = ids_tensor((self.batch_size, 3) ,config.vocab_size )
A_ : List[Any] = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
A_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 )
A_ : int = torch.cat([input_mask, next_mask] ,dim=-1 )
A_ : List[str] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,output_hidden_states=_a ,)["""hidden_states"""][0]
A_ : Any = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,past_key_values=_a ,output_hidden_states=_a ,)["""hidden_states"""][0]
# select random slice
A_ : List[str] = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
A_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
A_ : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a ,_a ,atol=1e-3 ) )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Any = config_and_inputs
A_ : int = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
a_ = (LlamaForCausalLM,) if is_torch_available() else ()
a_ = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ = False
a_ = False
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = LlamaModelTester(self )
A_ : List[str] = ConfigTester(self ,config_class=_a ,hidden_size=37 )
def _a ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A_ : Dict = type
self.model_tester.create_and_check_model(*_a )
def _a ( self : List[Any] ):
'''simple docstring'''
A_ , A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[str] = 3
A_ : Any = input_dict["""input_ids"""]
A_ : Union[str, Any] = input_ids.ne(1 ).to(_a )
A_ : Union[str, Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
A_ : List[Any] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : int = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : Dict ):
'''simple docstring'''
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : str = 3
A_ : Union[str, Any] = """single_label_classification"""
A_ : Union[str, Any] = input_dict["""input_ids"""]
A_ : List[Any] = input_ids.ne(1 ).to(_a )
A_ : Dict = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
A_ : List[Any] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : List[str] = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Dict = 3
A_ : Dict = """multi_label_classification"""
A_ : Any = input_dict["""input_ids"""]
A_ : Optional[Any] = input_ids.ne(1 ).to(_a )
A_ : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
A_ : Optional[int] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : Any = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" )
def _a ( self : Any ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _a ( self : Optional[Any] ,_a : List[Any] ):
'''simple docstring'''
A_ , A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Tuple = ids_tensor([1, 10] ,config.vocab_size )
A_ : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A_ : int = LlamaModel(_a )
original_model.to(_a )
original_model.eval()
A_ : Tuple = original_model(_a ).last_hidden_state
A_ : Union[str, Any] = original_model(_a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A_ : Tuple = {"""type""": scaling_type, """factor""": 10.0}
A_ : int = LlamaModel(_a )
scaled_model.to(_a )
scaled_model.eval()
A_ : List[Any] = scaled_model(_a ).last_hidden_state
A_ : Any = scaled_model(_a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_a ,_a ,atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_a ,_a ,atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_a ,_a ,atol=1e-5 ) )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Any = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : List[str] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" ,device_map="""auto""" )
A_ : str = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
A_ : Union[str, Any] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : str = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : str ):
'''simple docstring'''
A_ : Dict = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" ,device_map="""auto""" )
A_ : Tuple = model(torch.tensor(_a ) )
# Expected mean on dim = -1
A_ : str = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : str = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" ,device_map="""auto""" )
A_ : int = model(torch.tensor(_a ) )
# Expected mean on dim = -1
A_ : Union[str, Any] = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : Optional[int] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
@unittest.skip(
"""Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" )
@slow
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : str = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" ,device_map="""auto""" )
A_ : Tuple = model(torch.tensor(_a ) )
A_ : Dict = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# fmt: off
A_ : List[str] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Model is curently gated""" )
@slow
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Union[str, Any] = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
A_ : List[str] = """Simply put, the theory of relativity states that """
A_ : Any = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" )
A_ : Union[str, Any] = tokenizer.encode(_a ,return_tensors="""pt""" )
A_ : List[str] = LlamaForCausalLM.from_pretrained(
"""meta-llama/Llama-2-13b-chat-hf""" ,device_map="""sequential""" ,use_safetensors=_a )
# greedy generation outputs
A_ : str = model.generate(_a ,max_new_tokens=64 ,top_p=_a ,temperature=1 ,do_sample=_a )
A_ : Optional[Any] = tokenizer.decode(generated_ids[0] ,skip_special_tokens=_a )
self.assertEqual(_a ,_a )
| 665 |
'''simple docstring'''
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__magic_name__ = 16
__magic_name__ = 32
def lowerCamelCase ( lowerCamelCase : Accelerator , lowerCamelCase : int = 16):
A_ : Any = AutoTokenizer.from_pretrained("""bert-base-cased""")
A_ : str = load_dataset("""glue""" , """mrpc""")
def tokenize_function(lowerCamelCase : Dict):
# max_length=None => use the model max length (it's actually the default)
A_ : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase , max_length=lowerCamelCase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A_ : Tuple = datasets.map(
lowerCamelCase , batched=lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A_ : List[str] = tokenized_datasets.rename_column("""label""" , """labels""")
def collate_fn(lowerCamelCase : Tuple):
# On TPU it's best to pad everything to the same length or training will be very slow.
A_ : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A_ : List[Any] = 16
elif accelerator.mixed_precision != "no":
A_ : Any = 8
else:
A_ : Tuple = None
return tokenizer.pad(
lowerCamelCase , padding="""longest""" , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
A_ : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase , drop_last=lowerCamelCase)
A_ : str = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase , drop_last=(accelerator.mixed_precision == """fp8""") , )
return train_dataloader, eval_dataloader
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Dict):
# Initialize accelerator
A_ : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A_ : List[Any] = config["""lr"""]
A_ : List[Any] = int(config["""num_epochs"""])
A_ : int = int(config["""seed"""])
A_ : Dict = int(config["""batch_size"""])
A_ : Union[str, Any] = evaluate.load("""glue""" , """mrpc""")
# If the batch size is too big we use gradient accumulation
A_ : int = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A_ : Any = batch_size // MAX_GPU_BATCH_SIZE
A_ : Union[str, Any] = MAX_GPU_BATCH_SIZE
set_seed(lowerCamelCase)
A_ , A_ : List[str] = get_dataloaders(lowerCamelCase , lowerCamelCase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCamelCase)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A_ : str = model.to(accelerator.device)
# Instantiate optimizer
A_ : str = AdamW(params=model.parameters() , lr=lowerCamelCase)
# Instantiate scheduler
A_ : Tuple = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(lowerCamelCase) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A_ , A_ , A_ , A_ , A_ : Union[str, Any] = accelerator.prepare(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Now we train the model
for epoch in range(lowerCamelCase):
model.train()
for step, batch in enumerate(lowerCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
A_ : Optional[int] = model(**lowerCamelCase)
A_ : List[Any] = outputs.loss
A_ : Tuple = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
A_ : Union[str, Any] = model(**lowerCamelCase)
A_ : Any = outputs.logits.argmax(dim=-1)
A_ , A_ : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""]))
metric.add_batch(
predictions=lowerCamelCase , references=lowerCamelCase , )
A_ : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowerCamelCase)
def lowerCamelCase ( ):
A_ : Optional[int] = argparse.ArgumentParser(description="""Simple example of training script.""")
parser.add_argument(
"""--mixed_precision""" , type=lowerCamelCase , default=lowerCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""")
A_ : Dict = parser.parse_args()
A_ : Dict = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowerCamelCase , lowerCamelCase)
if __name__ == "__main__":
main()
| 665 | 1 |
'''simple docstring'''
import itertools
import string
from collections.abc import Generator, Iterable
def lowerCamelCase ( lowerCamelCase : Iterable[str] , lowerCamelCase : int):
A_ : List[Any] = iter(lowerCamelCase)
while True:
A_ : Optional[Any] = tuple(itertools.islice(lowerCamelCase , lowerCamelCase))
if not chunk:
return
yield chunk
def lowerCamelCase ( lowerCamelCase : str):
A_ : int = """""".join([c.upper() for c in dirty if c in string.ascii_letters])
A_ : int = """"""
if len(lowerCamelCase) < 2:
return dirty
for i in range(len(lowerCamelCase) - 1):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(lowerCamelCase) & 1:
clean += "X"
return clean
def lowerCamelCase ( lowerCamelCase : str):
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
A_ : str = """ABCDEFGHIKLMNOPQRSTUVWXYZ"""
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
A_ : Union[str, Any] = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(lowerCamelCase)
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(lowerCamelCase)
return table
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str):
A_ : List[Any] = generate_table(lowerCamelCase)
A_ : List[Any] = prepare_input(lowerCamelCase)
A_ : Dict = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowerCamelCase , 2):
A_ , A_ : Tuple = divmod(table.index(lowerCamelCase) , 5)
A_ , A_ : int = divmod(table.index(lowerCamelCase) , 5)
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str):
A_ : Union[str, Any] = generate_table(lowerCamelCase)
A_ : List[str] = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowerCamelCase , 2):
A_ , A_ : Optional[Any] = divmod(table.index(lowerCamelCase) , 5)
A_ , A_ : str = divmod(table.index(lowerCamelCase) , 5)
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 665 |
'''simple docstring'''
import functools
def lowerCamelCase ( lowerCamelCase : list[int] , lowerCamelCase : list[int]):
# Validation
if not isinstance(lowerCamelCase , lowerCamelCase) or not all(isinstance(lowerCamelCase , lowerCamelCase) for day in days):
raise ValueError("""The parameter days should be a list of integers""")
if len(lowerCamelCase) != 3 or not all(isinstance(lowerCamelCase , lowerCamelCase) for cost in costs):
raise ValueError("""The parameter costs should be a list of three integers""")
if len(lowerCamelCase) == 0:
return 0
if min(lowerCamelCase) <= 0:
raise ValueError("""All days elements should be greater than 0""")
if max(lowerCamelCase) >= 366:
raise ValueError("""All days elements should be less than 366""")
A_ : Tuple = set(lowerCamelCase)
@functools.cache
def dynamic_programming(lowerCamelCase : int) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1)
return min(
costs[0] + dynamic_programming(index + 1) , costs[1] + dynamic_programming(index + 7) , costs[2] + dynamic_programming(index + 30) , )
return dynamic_programming(1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training')
# TF training parameters
__magic_name__ = False
__magic_name__ = False
def lowerCamelCase ( lowerCamelCase : Namespace):
return TrainCommand(lowerCamelCase)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
def _a ( _a : ArgumentParser ):
'''simple docstring'''
A_ : List[Any] = parser.add_parser("""train""" ,help="""CLI tool to train a model on a task.""" )
train_parser.add_argument(
"""--train_data""" ,type=_a ,required=_a ,help="""path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.""" ,)
train_parser.add_argument(
"""--column_label""" ,type=_a ,default=0 ,help="""Column of the dataset csv file with example labels.""" )
train_parser.add_argument(
"""--column_text""" ,type=_a ,default=1 ,help="""Column of the dataset csv file with example texts.""" )
train_parser.add_argument(
"""--column_id""" ,type=_a ,default=2 ,help="""Column of the dataset csv file with example ids.""" )
train_parser.add_argument(
"""--skip_first_row""" ,action="""store_true""" ,help="""Skip the first row of the csv file (headers).""" )
train_parser.add_argument("""--validation_data""" ,type=_a ,default="""""" ,help="""path to validation dataset.""" )
train_parser.add_argument(
"""--validation_split""" ,type=_a ,default=0.1 ,help="""if validation dataset is not provided, fraction of train dataset to use as validation dataset.""" ,)
train_parser.add_argument("""--output""" ,type=_a ,default="""./""" ,help="""path to saved the trained model.""" )
train_parser.add_argument(
"""--task""" ,type=_a ,default="""text_classification""" ,help="""Task to train the model on.""" )
train_parser.add_argument(
"""--model""" ,type=_a ,default="""bert-base-uncased""" ,help="""Model's name or path to stored model.""" )
train_parser.add_argument("""--train_batch_size""" ,type=_a ,default=32 ,help="""Batch size for training.""" )
train_parser.add_argument("""--valid_batch_size""" ,type=_a ,default=64 ,help="""Batch size for validation.""" )
train_parser.add_argument("""--learning_rate""" ,type=_a ,default=3e-5 ,help="""Learning rate.""" )
train_parser.add_argument("""--adam_epsilon""" ,type=_a ,default=1e-08 ,help="""Epsilon for Adam optimizer.""" )
train_parser.set_defaults(func=_a )
def __init__( self : List[str] ,_a : Namespace ):
'''simple docstring'''
A_ : Optional[Any] = logging.get_logger("""transformers-cli/training""" )
A_ : Optional[int] = """tf""" if is_tf_available() else """torch"""
os.makedirs(args.output ,exist_ok=_a )
A_ : Union[str, Any] = args.output
A_ : Union[str, Any] = args.column_label
A_ : List[Any] = args.column_text
A_ : Any = args.column_id
self.logger.info(f'Loading {args.task} pipeline for {args.model}' )
if args.task == "text_classification":
A_ : Optional[Any] = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'Loading dataset from {args.train_data}' )
A_ : Union[str, Any] = Processor.create_from_csv(
args.train_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,)
A_ : List[Any] = None
if args.validation_data:
self.logger.info(f'Loading validation dataset from {args.validation_data}' )
A_ : List[str] = Processor.create_from_csv(
args.validation_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,)
A_ : Optional[int] = args.validation_split
A_ : Tuple = args.train_batch_size
A_ : List[Any] = args.valid_batch_size
A_ : List[Any] = args.learning_rate
A_ : Tuple = args.adam_epsilon
def _a ( self : Optional[Any] ):
'''simple docstring'''
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def _a ( self : Optional[int] ):
'''simple docstring'''
raise NotImplementedError
def _a ( self : Optional[Any] ):
'''simple docstring'''
self.pipeline.fit(
self.train_dataset ,validation_data=self.valid_dataset ,validation_split=self.validation_split ,learning_rate=self.learning_rate ,adam_epsilon=self.adam_epsilon ,train_batch_size=self.train_batch_size ,valid_batch_size=self.valid_batch_size ,)
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 665 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def lowerCamelCase ( lowerCamelCase : NDArray[floataa] , lowerCamelCase : NDArray[floataa] , lowerCamelCase : list[int] , lowerCamelCase : int , ):
A_ , A_ : int = coefficient_matrix.shape
A_ , A_ : Union[str, Any] = constant_matrix.shape
if rowsa != colsa:
A_ : Any = F'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'
raise ValueError(lowerCamelCase)
if colsa != 1:
A_ : Tuple = F'Constant matrix must be nx1 but received {rowsa}x{colsa}'
raise ValueError(lowerCamelCase)
if rowsa != rowsa:
A_ : Dict = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
F'received {rowsa}x{colsa} and {rowsa}x{colsa}'
)
raise ValueError(lowerCamelCase)
if len(lowerCamelCase) != rowsa:
A_ : Union[str, Any] = (
"""Number of initial values must be equal to number of rows in coefficient """
F'matrix but received {len(lowerCamelCase)} and {rowsa}'
)
raise ValueError(lowerCamelCase)
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""")
A_ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1)
A_ , A_ : int = table.shape
strictly_diagonally_dominant(lowerCamelCase)
# Iterates the whole matrix for given number of times
for _ in range(lowerCamelCase):
A_ : List[Any] = []
for row in range(lowerCamelCase):
A_ : int = 0
for col in range(lowerCamelCase):
if col == row:
A_ : List[str] = table[row][col]
elif col == cols - 1:
A_ : str = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
A_ : Union[str, Any] = (temp + val) / denom
new_val.append(lowerCamelCase)
A_ : Tuple = new_val
return [float(lowerCamelCase) for i in new_val]
def lowerCamelCase ( lowerCamelCase : NDArray[floataa]):
A_ , A_ : Dict = table.shape
A_ : Union[str, Any] = True
for i in range(0 , lowerCamelCase):
A_ : str = 0
for j in range(0 , cols - 1):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""")
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict ,_a : Any ,_a : Optional[int]=12 ,_a : Tuple=7 ,_a : Dict=True ,_a : Tuple=True ,_a : int=True ,_a : List[Any]=99 ,_a : Dict=32 ,_a : int=32 ,_a : Any=2 ,_a : Any=4 ,_a : Optional[Any]=37 ,_a : Dict=0.1 ,_a : Tuple=0.1 ,_a : Tuple=512 ,_a : List[str]=0.02 ,_a : List[Any]=0 ,_a : List[str]=None ,):
'''simple docstring'''
A_ : Optional[int] = parent
A_ : Any = batch_size
A_ : Any = seq_length
A_ : Optional[Any] = is_training
A_ : List[str] = use_input_mask
A_ : Union[str, Any] = use_labels
A_ : Union[str, Any] = vocab_size
A_ : Optional[int] = hidden_size
A_ : List[str] = projection_dim
A_ : List[Any] = num_hidden_layers
A_ : Optional[int] = num_attention_heads
A_ : int = intermediate_size
A_ : List[Any] = dropout
A_ : int = attention_dropout
A_ : str = max_position_embeddings
A_ : Dict = initializer_range
A_ : List[str] = scope
A_ : List[Any] = bos_token_id
def _a ( self : List[str] ):
'''simple docstring'''
A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A_ : int = None
if self.use_input_mask:
A_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
A_ : Optional[int] = input_mask.numpy()
A_ , A_ : Tuple = input_mask.shape
A_ : Tuple = np.random.randint(1 ,seq_length - 1 ,size=(batch_size,) )
for batch_idx, start_index in enumerate(_a ):
A_ : Tuple = 1
A_ : int = 0
A_ : Optional[Any] = self.get_config()
return config, input_ids, tf.convert_to_tensor(_a )
def _a ( self : str ):
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,projection_dim=self.projection_dim ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,dropout=self.dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,bos_token_id=self.bos_token_id ,)
def _a ( self : Dict ,_a : Tuple ,_a : Optional[Any] ,_a : Optional[Any] ):
'''simple docstring'''
A_ : Dict = TFBlipTextModel(config=_a )
A_ : List[str] = model(_a ,attention_mask=_a ,training=_a )
A_ : Dict = model(_a ,training=_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def _a ( self : Any ):
'''simple docstring'''
A_ : str = self.prepare_config_and_inputs()
A_ , A_ , A_ : List[Any] = config_and_inputs
A_ : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (TFBlipTextModel,) if is_tf_available() else ()
a_ = False
a_ = False
a_ = False
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Dict = BlipTextModelTester(self )
A_ : str = ConfigTester(self ,config_class=_a ,hidden_size=37 )
def _a ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _a ( self : str ):
'''simple docstring'''
pass
def _a ( self : Any ):
'''simple docstring'''
pass
@unittest.skip(reason="""Blip does not use inputs_embeds""" )
def _a ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def _a ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def _a ( self : Any ):
'''simple docstring'''
pass
@slow
def _a ( self : List[Any] ):
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Union[str, Any] = TFBlipTextModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def _a ( self : str ,_a : Any=True ):
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=_a )
| 665 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str):
A_ : Any = len(lowerCamelCase)
A_ : Optional[Any] = len(lowerCamelCase)
A_ : Optional[int] = [[False for _ in range(m + 1)] for _ in range(n + 1)]
A_ : Union[str, Any] = True
for i in range(lowerCamelCase):
for j in range(m + 1):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
A_ : Optional[int] = True
if a[i].islower():
A_ : List[Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'spiece.model'}
__magic_name__ = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
__magic_name__ = {
'google/bigbird-roberta-base': 4_096,
'google/bigbird-roberta-large': 4_096,
'google/bigbird-base-trivia-itc': 4_096,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
a_ = []
def __init__( self : Optional[int] ,_a : int ,_a : Optional[Any]="<unk>" ,_a : int="<s>" ,_a : str="</s>" ,_a : Optional[Any]="<pad>" ,_a : Tuple="[SEP]" ,_a : Tuple="[MASK]" ,_a : Union[str, Any]="[CLS]" ,_a : Optional[Dict[str, Any]] = None ,**_a : Any ,):
'''simple docstring'''
A_ : Dict = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else bos_token
A_ : Union[str, Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else eos_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else unk_token
A_ : Union[str, Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else pad_token
A_ : Any = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else cls_token
A_ : Optional[int] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
A_ : List[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
A_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a ,eos_token=_a ,unk_token=_a ,pad_token=_a ,sep_token=_a ,mask_token=_a ,cls_token=_a ,sp_model_kwargs=self.sp_model_kwargs ,**_a ,)
A_ : Optional[int] = vocab_file
A_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def _a ( self : Union[str, Any] ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Tuple = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = self.__dict__.copy()
A_ : Union[str, Any] = None
return state
def __setstate__( self : List[Any] ,_a : Any ):
'''simple docstring'''
A_ : Tuple = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
A_ : Tuple = {}
A_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self : Union[str, Any] ,_a : str ):
'''simple docstring'''
return self.sp_model.encode(_a ,out_type=_a )
def _a ( self : Optional[int] ,_a : str ):
'''simple docstring'''
return self.sp_model.piece_to_id(_a )
def _a ( self : int ,_a : Optional[int] ):
'''simple docstring'''
A_ : List[str] = self.sp_model.IdToPiece(_a )
return token
def _a ( self : Dict ,_a : int ):
'''simple docstring'''
A_ : int = []
A_ : Any = """"""
A_ : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
A_ : Dict = True
A_ : Union[str, Any] = []
else:
current_sub_tokens.append(_a )
A_ : str = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def _a ( self : int ,_a : List[int] ,_a : bool = False ,_a : bool = None ,_a : bool = True ,**_a : str ,):
'''simple docstring'''
A_ : Any = kwargs.pop("""use_source_tokenizer""" ,_a )
A_ : Union[str, Any] = self.convert_ids_to_tokens(_a ,skip_special_tokens=_a )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
A_ : str = []
A_ : int = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
A_ : List[str] = []
sub_texts.append(_a )
else:
current_sub_text.append(_a )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
A_ : Optional[int] = re.sub(r""" (\[(MASK|SEP)\])""" ,r"""\1""" ,""" """.join(_a ) )
else:
A_ : Tuple = """""".join(_a )
A_ : str = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
A_ : Optional[Any] = self.clean_up_tokenization(_a )
return clean_text
else:
return text
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_a )
elif not os.path.isfile(self.vocab_file ):
with open(_a ,"""wb""" ) as fi:
A_ : str = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
def _a ( self : Optional[Any] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ : List[Any] = [self.cls_token_id]
A_ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self : Optional[int] ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
def _a ( self : Tuple ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Tuple = [self.sep_token_id]
A_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 665 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
a_ = 42
a_ = 42
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ,_a : int ):
'''simple docstring'''
A_ : list[list[Edge]] = [[] for _ in range(_a )]
A_ : List[Any] = size
def __getitem__( self : int ,_a : int ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def _a ( self : str ):
'''simple docstring'''
return self._size
def _a ( self : str ,_a : int ,_a : int ,_a : int ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(_a ,_a ) )
def _a ( self : Dict ,_a : int ,_a : int ):
'''simple docstring'''
A_ : Tuple = deque([start_vertex] )
A_ : list[int | None] = [None] * self.size
A_ : Union[str, Any] = 0
while queue:
A_ : List[Any] = queue.popleft()
A_ : Tuple = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
A_ : Union[str, Any] = current_distance + edge.weight
A_ : Optional[Any] = distances[edge.destination_vertex]
if (
isinstance(_a ,_a )
and new_distance >= dest_vertex_distance
):
continue
A_ : Tuple = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
__magic_name__ = 'CompVis/stable-diffusion-v1-1'
__magic_name__ = 'CompVis/stable-diffusion-v1-2'
__magic_name__ = 'CompVis/stable-diffusion-v1-3'
__magic_name__ = 'CompVis/stable-diffusion-v1-4'
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[Any] ,_a : AutoencoderKL ,_a : CLIPTextModel ,_a : CLIPTokenizer ,_a : UNetaDConditionModel ,_a : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] ,_a : StableDiffusionSafetyChecker ,_a : CLIPImageProcessor ,_a : bool = True ,):
'''simple docstring'''
super()._init_()
A_ : Optional[int] = StableDiffusionPipeline.from_pretrained(_a )
A_ : Any = StableDiffusionPipeline.from_pretrained(_a )
A_ : int = StableDiffusionPipeline.from_pretrained(_a )
A_ : List[str] = StableDiffusionPipeline(
vae=_a ,text_encoder=_a ,tokenizer=_a ,unet=_a ,scheduler=_a ,safety_checker=_a ,feature_extractor=_a ,requires_safety_checker=_a ,)
self.register_modules(pipelinea=self.pipea ,pipelinea=self.pipea ,pipelinea=self.pipea ,pipelinea=self.pipea )
@property
def _a ( self : int ):
'''simple docstring'''
return {k: getattr(self ,_a ) for k in self.config.keys() if not k.startswith("""_""" )}
def _a ( self : str ,_a : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
A_ : Optional[int] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a )
def _a ( self : Any ):
'''simple docstring'''
self.enable_attention_slicing(_a )
@torch.no_grad()
def _a ( self : List[Any] ,_a : Union[str, List[str]] ,_a : int = 512 ,_a : int = 512 ,_a : int = 50 ,_a : float = 7.5 ,_a : Optional[Union[str, List[str]]] = None ,_a : Optional[int] = 1 ,_a : float = 0.0 ,_a : Optional[torch.Generator] = None ,_a : Optional[torch.FloatTensor] = None ,_a : Optional[str] = "pil" ,_a : bool = True ,_a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,_a : int = 1 ,**_a : List[Any] ,):
'''simple docstring'''
return self.pipea(
prompt=_a ,height=_a ,width=_a ,num_inference_steps=_a ,guidance_scale=_a ,negative_prompt=_a ,num_images_per_prompt=_a ,eta=_a ,generator=_a ,latents=_a ,output_type=_a ,return_dict=_a ,callback=_a ,callback_steps=_a ,**_a ,)
@torch.no_grad()
def _a ( self : Optional[int] ,_a : Union[str, List[str]] ,_a : int = 512 ,_a : int = 512 ,_a : int = 50 ,_a : float = 7.5 ,_a : Optional[Union[str, List[str]]] = None ,_a : Optional[int] = 1 ,_a : float = 0.0 ,_a : Optional[torch.Generator] = None ,_a : Optional[torch.FloatTensor] = None ,_a : Optional[str] = "pil" ,_a : bool = True ,_a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,_a : int = 1 ,**_a : List[Any] ,):
'''simple docstring'''
return self.pipea(
prompt=_a ,height=_a ,width=_a ,num_inference_steps=_a ,guidance_scale=_a ,negative_prompt=_a ,num_images_per_prompt=_a ,eta=_a ,generator=_a ,latents=_a ,output_type=_a ,return_dict=_a ,callback=_a ,callback_steps=_a ,**_a ,)
@torch.no_grad()
def _a ( self : Tuple ,_a : Union[str, List[str]] ,_a : int = 512 ,_a : int = 512 ,_a : int = 50 ,_a : float = 7.5 ,_a : Optional[Union[str, List[str]]] = None ,_a : Optional[int] = 1 ,_a : float = 0.0 ,_a : Optional[torch.Generator] = None ,_a : Optional[torch.FloatTensor] = None ,_a : Optional[str] = "pil" ,_a : bool = True ,_a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,_a : int = 1 ,**_a : Union[str, Any] ,):
'''simple docstring'''
return self.pipea(
prompt=_a ,height=_a ,width=_a ,num_inference_steps=_a ,guidance_scale=_a ,negative_prompt=_a ,num_images_per_prompt=_a ,eta=_a ,generator=_a ,latents=_a ,output_type=_a ,return_dict=_a ,callback=_a ,callback_steps=_a ,**_a ,)
@torch.no_grad()
def _a ( self : Tuple ,_a : Union[str, List[str]] ,_a : int = 512 ,_a : int = 512 ,_a : int = 50 ,_a : float = 7.5 ,_a : Optional[Union[str, List[str]]] = None ,_a : Optional[int] = 1 ,_a : float = 0.0 ,_a : Optional[torch.Generator] = None ,_a : Optional[torch.FloatTensor] = None ,_a : Optional[str] = "pil" ,_a : bool = True ,_a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,_a : int = 1 ,**_a : Union[str, Any] ,):
'''simple docstring'''
return self.pipea(
prompt=_a ,height=_a ,width=_a ,num_inference_steps=_a ,guidance_scale=_a ,negative_prompt=_a ,num_images_per_prompt=_a ,eta=_a ,generator=_a ,latents=_a ,output_type=_a ,return_dict=_a ,callback=_a ,callback_steps=_a ,**_a ,)
@torch.no_grad()
def _a ( self : str ,_a : Union[str, List[str]] ,_a : int = 512 ,_a : int = 512 ,_a : int = 50 ,_a : float = 7.5 ,_a : Optional[Union[str, List[str]]] = None ,_a : Optional[int] = 1 ,_a : float = 0.0 ,_a : Optional[torch.Generator] = None ,_a : Optional[torch.FloatTensor] = None ,_a : Optional[str] = "pil" ,_a : bool = True ,_a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,_a : int = 1 ,**_a : Dict ,):
'''simple docstring'''
A_ : Optional[Any] = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(_a )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` must be divisible by 8 but are {height} and {width}.' )
# Get first result from Stable Diffusion Checkpoint v1.1
A_ : Dict = self.textaimg_sda_a(
prompt=_a ,height=_a ,width=_a ,num_inference_steps=_a ,guidance_scale=_a ,negative_prompt=_a ,num_images_per_prompt=_a ,eta=_a ,generator=_a ,latents=_a ,output_type=_a ,return_dict=_a ,callback=_a ,callback_steps=_a ,**_a ,)
# Get first result from Stable Diffusion Checkpoint v1.2
A_ : Dict = self.textaimg_sda_a(
prompt=_a ,height=_a ,width=_a ,num_inference_steps=_a ,guidance_scale=_a ,negative_prompt=_a ,num_images_per_prompt=_a ,eta=_a ,generator=_a ,latents=_a ,output_type=_a ,return_dict=_a ,callback=_a ,callback_steps=_a ,**_a ,)
# Get first result from Stable Diffusion Checkpoint v1.3
A_ : Optional[Any] = self.textaimg_sda_a(
prompt=_a ,height=_a ,width=_a ,num_inference_steps=_a ,guidance_scale=_a ,negative_prompt=_a ,num_images_per_prompt=_a ,eta=_a ,generator=_a ,latents=_a ,output_type=_a ,return_dict=_a ,callback=_a ,callback_steps=_a ,**_a ,)
# Get first result from Stable Diffusion Checkpoint v1.4
A_ : List[str] = self.textaimg_sda_a(
prompt=_a ,height=_a ,width=_a ,num_inference_steps=_a ,guidance_scale=_a ,negative_prompt=_a ,num_images_per_prompt=_a ,eta=_a ,generator=_a ,latents=_a ,output_type=_a ,return_dict=_a ,callback=_a ,callback_steps=_a ,**_a ,)
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 665 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : int = 10**9):
A_ : Optional[int] = 1
A_ : int = 2
A_ : List[Any] = 0
A_ : Optional[Any] = 0
A_ : str = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
A_ : Optional[Any] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 665 | 1 |
'''simple docstring'''
import cva
import numpy as np
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Tuple ,_a : float ,_a : int ):
'''simple docstring'''
if k in (0.04, 0.06):
A_ : int = k
A_ : Any = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : List[str] ):
'''simple docstring'''
return str(self.k )
def _a ( self : Any ,_a : str ):
'''simple docstring'''
A_ : Union[str, Any] = cva.imread(_a ,0 )
A_ , A_ : Optional[int] = img.shape
A_ : list[list[int]] = []
A_ : Optional[int] = img.copy()
A_ : Optional[int] = cva.cvtColor(_a ,cva.COLOR_GRAY2RGB )
A_ , A_ : List[str] = np.gradient(_a )
A_ : List[str] = dx**2
A_ : Optional[int] = dy**2
A_ : Optional[int] = dx * dy
A_ : str = 0.04
A_ : Dict = self.window_size // 2
for y in range(_a ,h - offset ):
for x in range(_a ,w - offset ):
A_ : List[str] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A_ : str = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A_ : Any = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A_ : int = (wxx * wyy) - (wxy**2)
A_ : Any = wxx + wyy
A_ : str = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) ,0 )
color_img.itemset((y, x, 1) ,0 )
color_img.itemset((y, x, 2) ,255 )
return color_img, corner_list
if __name__ == "__main__":
__magic_name__ = HarrisCorner(0.0_4, 3)
__magic_name__ , __magic_name__ = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 665 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def lowerCamelCase ( ):
A_ : Optional[int] = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=lowerCamelCase)
A_ : Optional[int] = parser.add_subparsers(help="""accelerate command helpers""")
# Register commands
get_config_parser(subparsers=lowerCamelCase)
env_command_parser(subparsers=lowerCamelCase)
launch_command_parser(subparsers=lowerCamelCase)
tpu_command_parser(subparsers=lowerCamelCase)
test_command_parser(subparsers=lowerCamelCase)
# Let's go
A_ : Dict = parser.parse_args()
if not hasattr(lowerCamelCase , """func"""):
parser.print_help()
exit(1)
# Run
args.func(lowerCamelCase)
if __name__ == "__main__":
main()
| 665 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__magic_name__ = {
'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'},
'tokenizer_file': {
'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'
},
}
__magic_name__ = {'mobilebert-uncased': 512}
__magic_name__ = {}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_INIT_CONFIGURATION
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = MobileBertTokenizer
def __init__( self : str ,_a : Optional[int]=None ,_a : int=None ,_a : Optional[Any]=True ,_a : Optional[Any]="[UNK]" ,_a : Dict="[SEP]" ,_a : int="[PAD]" ,_a : Optional[int]="[CLS]" ,_a : Any="[MASK]" ,_a : Any=True ,_a : Union[str, Any]=None ,**_a : int ,):
'''simple docstring'''
super().__init__(
_a ,tokenizer_file=_a ,do_lower_case=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,tokenize_chinese_chars=_a ,strip_accents=_a ,**_a ,)
A_ : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,_a ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,_a ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,_a ) != tokenize_chinese_chars
):
A_ : Tuple = getattr(_a ,normalizer_state.pop("""type""" ) )
A_ : int = do_lower_case
A_ : List[Any] = strip_accents
A_ : List[Any] = tokenize_chinese_chars
A_ : Dict = normalizer_class(**_a )
A_ : Optional[int] = do_lower_case
def _a ( self : List[str] ,_a : Any ,_a : List[Any]=None ):
'''simple docstring'''
A_ : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a ( self : int ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Optional[Any] = [self.sep_token_id]
A_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self : List[str] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
A_ : List[str] = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
| 665 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : int):
A_ : list[list[str]] = [[] for _ in range(lowerCamelCase)]
A_ : List[Any] = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""")
if key == 1 or len(lowerCamelCase) <= key:
return input_string
for position, character in enumerate(lowerCamelCase):
A_ : Any = position % (lowest * 2) # puts it in bounds
A_ : List[Any] = min(lowerCamelCase , lowest * 2 - num) # creates zigzag pattern
temp_grid[num].append(lowerCamelCase)
A_ : int = ["""""".join(lowerCamelCase) for row in temp_grid]
A_ : Dict = """""".join(lowerCamelCase)
return output_string
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : int):
A_ : Dict = []
A_ : int = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""")
if key == 1:
return input_string
A_ : list[list[str]] = [[] for _ in range(lowerCamelCase)] # generates template
for position in range(len(lowerCamelCase)):
A_ : Optional[Any] = position % (lowest * 2) # puts it in bounds
A_ : List[Any] = min(lowerCamelCase , lowest * 2 - num) # creates zigzag pattern
temp_grid[num].append("""*""")
A_ : str = 0
for row in temp_grid: # fills in the characters
A_ : Optional[int] = input_string[counter : counter + len(lowerCamelCase)]
grid.append(list(lowerCamelCase))
counter += len(lowerCamelCase)
A_ : Any = """""" # reads as zigzag
for position in range(len(lowerCamelCase)):
A_ : Optional[int] = position % (lowest * 2) # puts it in bounds
A_ : Dict = min(lowerCamelCase , lowest * 2 - num) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0)
return output_string
def lowerCamelCase ( lowerCamelCase : str):
A_ : str = {}
for key_guess in range(1 , len(lowerCamelCase)): # tries every key
A_ : int = decrypt(lowerCamelCase , lowerCamelCase)
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['YolosFeatureExtractor']
__magic_name__ = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 1 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def lowerCamelCase ( lowerCamelCase : NDArray[floataa] , lowerCamelCase : NDArray[floataa] , lowerCamelCase : list[int] , lowerCamelCase : int , ):
A_ , A_ : int = coefficient_matrix.shape
A_ , A_ : Union[str, Any] = constant_matrix.shape
if rowsa != colsa:
A_ : Any = F'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'
raise ValueError(lowerCamelCase)
if colsa != 1:
A_ : Tuple = F'Constant matrix must be nx1 but received {rowsa}x{colsa}'
raise ValueError(lowerCamelCase)
if rowsa != rowsa:
A_ : Dict = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
F'received {rowsa}x{colsa} and {rowsa}x{colsa}'
)
raise ValueError(lowerCamelCase)
if len(lowerCamelCase) != rowsa:
A_ : Union[str, Any] = (
"""Number of initial values must be equal to number of rows in coefficient """
F'matrix but received {len(lowerCamelCase)} and {rowsa}'
)
raise ValueError(lowerCamelCase)
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""")
A_ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1)
A_ , A_ : int = table.shape
strictly_diagonally_dominant(lowerCamelCase)
# Iterates the whole matrix for given number of times
for _ in range(lowerCamelCase):
A_ : List[Any] = []
for row in range(lowerCamelCase):
A_ : int = 0
for col in range(lowerCamelCase):
if col == row:
A_ : List[str] = table[row][col]
elif col == cols - 1:
A_ : str = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
A_ : Union[str, Any] = (temp + val) / denom
new_val.append(lowerCamelCase)
A_ : Tuple = new_val
return [float(lowerCamelCase) for i in new_val]
def lowerCamelCase ( lowerCamelCase : NDArray[floataa]):
A_ , A_ : Dict = table.shape
A_ : Union[str, Any] = True
for i in range(0 , lowerCamelCase):
A_ : str = 0
for j in range(0 , cols - 1):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""")
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 1 |
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
__magic_name__ = logging.getLogger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[Any] ,_a : Tuple=-1 ):
'''simple docstring'''
A_ : Tuple = label_idx
def _a ( self : Union[str, Any] ,_a : Any ,_a : Union[Split, str] ):
'''simple docstring'''
if isinstance(_a ,_a ):
A_ : Any = mode.value
A_ : Optional[int] = os.path.join(_a ,f'{mode}.txt' )
A_ : int = 1
A_ : Optional[Any] = []
with open(_a ,encoding="""utf-8""" ) as f:
A_ : Dict = []
A_ : List[str] = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' ,words=_a ,labels=_a ) )
guid_index += 1
A_ : List[str] = []
A_ : Union[str, Any] = []
else:
A_ : List[str] = line.split(""" """ )
words.append(splits[0] )
if len(_a ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" ,"""""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' ,words=_a ,labels=_a ) )
return examples
def _a ( self : Optional[int] ,_a : TextIO ,_a : TextIO ,_a : List ):
'''simple docstring'''
A_ : Dict = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(_a )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
A_ : Tuple = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(_a )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" ,line.split()[0] )
def _a ( self : int ,_a : str ):
'''simple docstring'''
if path:
with open(_a ,"""r""" ) as f:
A_ : Optional[Any] = f.read().splitlines()
if "O" not in labels:
A_ : Tuple = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : str ):
'''simple docstring'''
super().__init__(label_idx=-2 )
def _a ( self : Dict ,_a : str ):
'''simple docstring'''
if path:
with open(_a ,"""r""" ) as f:
A_ : str = f.read().splitlines()
if "O" not in labels:
A_ : Dict = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _a ( self : Any ,_a : int ,_a : Union[Split, str] ):
'''simple docstring'''
if isinstance(_a ,_a ):
A_ : List[str] = mode.value
A_ : Optional[Any] = os.path.join(_a ,f'{mode}.txt' )
A_ : Union[str, Any] = 1
A_ : str = []
with open(_a ,encoding="""utf-8""" ) as f:
for sentence in parse_incr(_a ):
A_ : Any = []
A_ : Optional[int] = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(_a ) == len(_a )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' ,words=_a ,labels=_a ) )
guid_index += 1
return examples
def _a ( self : Dict ,_a : TextIO ,_a : TextIO ,_a : List ):
'''simple docstring'''
A_ : Dict = 0
for sentence in parse_incr(_a ):
A_ : Tuple = preds_list[example_id]
A_ : Dict = """"""
for token in sentence:
out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(_a )
example_id += 1
def _a ( self : str ,_a : str ):
'''simple docstring'''
if path:
with open(_a ,"""r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 665 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : Tuple):
A_ : str = [0] * len(lowerCamelCase)
A_ : Union[str, Any] = []
A_ : Union[str, Any] = []
A_ : Tuple = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowerCamelCase)):
if indegree[i] == 0:
queue.append(lowerCamelCase)
while queue:
A_ : Any = queue.pop(0)
cnt += 1
topo.append(lowerCamelCase)
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(lowerCamelCase)
if cnt != len(lowerCamelCase):
print("""Cycle exists""")
else:
print(lowerCamelCase)
# Adjacency List of Graph
__magic_name__ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 665 | 1 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""image_processor""", """tokenizer"""]
a_ = """ViltImageProcessor"""
a_ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : List[str] ,_a : List[str]=None ,_a : Optional[int]=None ,**_a : List[str] ):
'''simple docstring'''
A_ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,_a ,)
A_ : List[Any] = kwargs.pop("""feature_extractor""" )
A_ : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_a ,_a )
A_ : Dict = self.image_processor
def __call__( self : Tuple ,_a : List[str] ,_a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,_a : bool = True ,_a : Union[bool, str, PaddingStrategy] = False ,_a : Union[bool, str, TruncationStrategy] = None ,_a : Optional[int] = None ,_a : int = 0 ,_a : Optional[int] = None ,_a : Optional[bool] = None ,_a : Optional[bool] = None ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = True ,_a : Optional[Union[str, TensorType]] = None ,**_a : List[Any] ,):
'''simple docstring'''
A_ : Tuple = self.tokenizer(
text=_a ,add_special_tokens=_a ,padding=_a ,truncation=_a ,max_length=_a ,stride=_a ,pad_to_multiple_of=_a ,return_token_type_ids=_a ,return_attention_mask=_a ,return_overflowing_tokens=_a ,return_special_tokens_mask=_a ,return_offsets_mapping=_a ,return_length=_a ,verbose=_a ,return_tensors=_a ,**_a ,)
# add pixel_values + pixel_mask
A_ : List[Any] = self.image_processor(_a ,return_tensors=_a )
encoding.update(_a )
return encoding
def _a ( self : int ,*_a : Any ,**_a : Any ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_a ,**_a )
def _a ( self : str ,*_a : Union[str, Any] ,**_a : List[str] ):
'''simple docstring'''
return self.tokenizer.decode(*_a ,**_a )
@property
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Dict = self.tokenizer.model_input_names
A_ : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _a ( self : Union[str, Any] ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,_a ,)
return self.image_processor_class
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,_a ,)
return self.image_processor
| 665 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ,_a : List[Any] ,_a : Dict=13 ,_a : List[str]=7 ,_a : Dict=True ,_a : List[Any]=True ,_a : Dict=False ,_a : Optional[int]=True ,_a : List[Any]=99 ,_a : Any=32 ,_a : Optional[int]=5 ,_a : List[Any]=4 ,_a : int=37 ,_a : List[Any]="gelu" ,_a : List[str]=0.1 ,_a : Union[str, Any]=0.1 ,_a : Any=512 ,_a : int=16 ,_a : Optional[int]=2 ,_a : Any=0.02 ,_a : Any=3 ,_a : Any=4 ,_a : List[str]=None ,):
'''simple docstring'''
A_ : List[str] = parent
A_ : Any = batch_size
A_ : Tuple = seq_length
A_ : List[str] = is_training
A_ : Tuple = use_input_mask
A_ : Dict = use_token_type_ids
A_ : List[Any] = use_labels
A_ : Union[str, Any] = vocab_size
A_ : Any = hidden_size
A_ : str = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : str = intermediate_size
A_ : Tuple = hidden_act
A_ : Any = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : List[str] = max_position_embeddings
A_ : int = type_vocab_size
A_ : Union[str, Any] = type_sequence_label_size
A_ : Any = initializer_range
A_ : List[Any] = num_labels
A_ : Optional[Any] = num_choices
A_ : List[Any] = scope
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A_ : int = None
if self.use_input_mask:
A_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : Dict = None
if self.use_token_type_ids:
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A_ : str = None
A_ : Any = None
A_ : str = None
if self.use_labels:
A_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A_ : Optional[int] = ids_tensor([self.batch_size] ,self.num_choices )
A_ : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : Optional[Any] ):
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_a ,initializer_range=self.initializer_range ,)
def _a ( self : Union[str, Any] ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Any ,_a : Any ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Tuple ):
'''simple docstring'''
A_ : Any = LlamaModel(config=_a )
model.to(_a )
model.eval()
A_ : Optional[Any] = model(_a ,attention_mask=_a )
A_ : Optional[int] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Optional[int] ,_a : int ,_a : List[str] ,_a : Any ,_a : Any ,_a : Dict ,_a : List[str] ,_a : Optional[int] ,_a : Any ,_a : List[str] ,):
'''simple docstring'''
A_ : List[str] = True
A_ : Union[str, Any] = LlamaModel(_a )
model.to(_a )
model.eval()
A_ : Tuple = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,)
A_ : List[Any] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,)
A_ : int = model(_a ,attention_mask=_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Any ,_a : Any ,_a : Optional[int] ,_a : List[Any] ,_a : List[Any] ,_a : Dict ,_a : Tuple ,_a : Optional[int] ,_a : List[Any] ,_a : Union[str, Any] ,):
'''simple docstring'''
A_ : List[Any] = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
A_ : Dict = model(_a ,attention_mask=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : str ,_a : List[Any] ,_a : Dict ,_a : str ,_a : Tuple ,_a : Tuple ,_a : Tuple ,_a : Optional[Any] ,_a : Dict ,_a : Union[str, Any] ,):
'''simple docstring'''
A_ : Optional[Any] = True
A_ : Any = True
A_ : Tuple = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
# first forward pass
A_ : Optional[int] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,use_cache=_a ,)
A_ : Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A_ : int = ids_tensor((self.batch_size, 3) ,config.vocab_size )
A_ : List[Any] = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
A_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 )
A_ : int = torch.cat([input_mask, next_mask] ,dim=-1 )
A_ : List[str] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,output_hidden_states=_a ,)["""hidden_states"""][0]
A_ : Any = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,past_key_values=_a ,output_hidden_states=_a ,)["""hidden_states"""][0]
# select random slice
A_ : List[str] = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
A_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
A_ : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a ,_a ,atol=1e-3 ) )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Any = config_and_inputs
A_ : int = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
a_ = (LlamaForCausalLM,) if is_torch_available() else ()
a_ = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ = False
a_ = False
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = LlamaModelTester(self )
A_ : List[str] = ConfigTester(self ,config_class=_a ,hidden_size=37 )
def _a ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A_ : Dict = type
self.model_tester.create_and_check_model(*_a )
def _a ( self : List[Any] ):
'''simple docstring'''
A_ , A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[str] = 3
A_ : Any = input_dict["""input_ids"""]
A_ : Union[str, Any] = input_ids.ne(1 ).to(_a )
A_ : Union[str, Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
A_ : List[Any] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : int = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : Dict ):
'''simple docstring'''
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : str = 3
A_ : Union[str, Any] = """single_label_classification"""
A_ : Union[str, Any] = input_dict["""input_ids"""]
A_ : List[Any] = input_ids.ne(1 ).to(_a )
A_ : Dict = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
A_ : List[Any] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : List[str] = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Dict = 3
A_ : Dict = """multi_label_classification"""
A_ : Any = input_dict["""input_ids"""]
A_ : Optional[Any] = input_ids.ne(1 ).to(_a )
A_ : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
A_ : Optional[int] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : Any = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" )
def _a ( self : Any ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _a ( self : Optional[Any] ,_a : List[Any] ):
'''simple docstring'''
A_ , A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Tuple = ids_tensor([1, 10] ,config.vocab_size )
A_ : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A_ : int = LlamaModel(_a )
original_model.to(_a )
original_model.eval()
A_ : Tuple = original_model(_a ).last_hidden_state
A_ : Union[str, Any] = original_model(_a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A_ : Tuple = {"""type""": scaling_type, """factor""": 10.0}
A_ : int = LlamaModel(_a )
scaled_model.to(_a )
scaled_model.eval()
A_ : List[Any] = scaled_model(_a ).last_hidden_state
A_ : Any = scaled_model(_a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_a ,_a ,atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_a ,_a ,atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_a ,_a ,atol=1e-5 ) )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Any = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : List[str] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" ,device_map="""auto""" )
A_ : str = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
A_ : Union[str, Any] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : str = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : str ):
'''simple docstring'''
A_ : Dict = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" ,device_map="""auto""" )
A_ : Tuple = model(torch.tensor(_a ) )
# Expected mean on dim = -1
A_ : str = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : str = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" ,device_map="""auto""" )
A_ : int = model(torch.tensor(_a ) )
# Expected mean on dim = -1
A_ : Union[str, Any] = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : Optional[int] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
@unittest.skip(
"""Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" )
@slow
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : str = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" ,device_map="""auto""" )
A_ : Tuple = model(torch.tensor(_a ) )
A_ : Dict = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# fmt: off
A_ : List[str] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Model is curently gated""" )
@slow
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Union[str, Any] = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
A_ : List[str] = """Simply put, the theory of relativity states that """
A_ : Any = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" )
A_ : Union[str, Any] = tokenizer.encode(_a ,return_tensors="""pt""" )
A_ : List[str] = LlamaForCausalLM.from_pretrained(
"""meta-llama/Llama-2-13b-chat-hf""" ,device_map="""sequential""" ,use_safetensors=_a )
# greedy generation outputs
A_ : str = model.generate(_a ,max_new_tokens=64 ,top_p=_a ,temperature=1 ,do_sample=_a )
A_ : Optional[Any] = tokenizer.decode(generated_ids[0] ,skip_special_tokens=_a )
self.assertEqual(_a ,_a )
| 665 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['ConditionalDetrFeatureExtractor']
__magic_name__ = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__magic_name__ = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
__magic_name__ = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
__magic_name__ = r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def _a ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ),
"""references""": datasets.Value("""string""" ),
} ) ,homepage="""https://github.com/hendrycks/math""" ,codebase_urls=["""https://github.com/hendrycks/math"""] ,)
def _a ( self : List[Any] ,_a : Union[str, Any] ,_a : Optional[int] ):
'''simple docstring'''
A_ : Union[str, Any] = 0.0
for i, j in zip(_a ,_a ):
n_correct += 1.0 if math_equivalence.is_equiv(_a ,_a ) else 0.0
A_ : List[str] = n_correct / len(_a )
return {
"accuracy": accuracy,
}
| 665 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_a ,"""hidden_sizes""" ) )
self.parent.assertTrue(hasattr(_a ,"""neck_hidden_sizes""" ) )
self.parent.assertTrue(hasattr(_a ,"""num_attention_heads""" ) )
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ,_a : int ,_a : List[str]=13 ,_a : Tuple=32 ,_a : Union[str, Any]=2 ,_a : Union[str, Any]=3 ,_a : Dict=640 ,_a : Any=4 ,_a : List[Any]="silu" ,_a : Union[str, Any]=3 ,_a : List[str]=32 ,_a : Union[str, Any]=0.1 ,_a : Any=0.1 ,_a : int=0.1 ,_a : str=0.02 ,_a : List[str]=True ,_a : Union[str, Any]=True ,_a : Optional[int]=10 ,_a : List[Any]=None ,):
'''simple docstring'''
A_ : int = parent
A_ : int = batch_size
A_ : Dict = image_size
A_ : Dict = patch_size
A_ : Dict = num_channels
A_ : str = last_hidden_size
A_ : Tuple = num_attention_heads
A_ : Any = hidden_act
A_ : Tuple = conv_kernel_size
A_ : Tuple = output_stride
A_ : List[Any] = hidden_dropout_prob
A_ : Tuple = attention_probs_dropout_prob
A_ : List[str] = classifier_dropout_prob
A_ : Tuple = use_labels
A_ : str = is_training
A_ : List[Any] = num_labels
A_ : int = initializer_range
A_ : Tuple = scope
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : List[str] = None
A_ : Dict = None
if self.use_labels:
A_ : List[str] = ids_tensor([self.batch_size] ,self.num_labels )
A_ : int = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
A_ : Dict = self.get_config()
return config, pixel_values, labels, pixel_labels
def _a ( self : Optional[Any] ):
'''simple docstring'''
return MobileViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,num_attention_heads=self.num_attention_heads ,hidden_act=self.hidden_act ,conv_kernel_size=self.conv_kernel_size ,output_stride=self.output_stride ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,)
def _a ( self : Optional[int] ,_a : Dict ,_a : Union[str, Any] ,_a : List[str] ,_a : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = MobileViTModel(config=_a )
model.to(_a )
model.eval()
A_ : int = model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape ,(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def _a ( self : Optional[Any] ,_a : str ,_a : int ,_a : Any ,_a : List[str] ):
'''simple docstring'''
A_ : Tuple = self.num_labels
A_ : Tuple = MobileViTForImageClassification(_a )
model.to(_a )
model.eval()
A_ : Optional[Any] = model(_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _a ( self : str ,_a : List[str] ,_a : List[str] ,_a : List[Any] ,_a : int ):
'''simple docstring'''
A_ : Optional[Any] = self.num_labels
A_ : str = MobileViTForSemanticSegmentation(_a )
model.to(_a )
model.eval()
A_ : List[Any] = model(_a )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
A_ : Tuple = model(_a ,labels=_a )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Tuple = self.prepare_config_and_inputs()
A_ , A_ , A_ , A_ : Optional[int] = config_and_inputs
A_ : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
a_ = (
{
"""feature-extraction""": MobileViTModel,
"""image-classification""": MobileViTForImageClassification,
"""image-segmentation""": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a_ = False
a_ = False
a_ = False
a_ = False
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : List[str] = MobileViTModelTester(self )
A_ : Optional[int] = MobileViTConfigTester(self ,config_class=_a ,has_text_modality=_a )
def _a ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViT does not use inputs_embeds""" )
def _a ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileViT does not support input and output embeddings""" )
def _a ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileViT does not output attentions""" )
def _a ( self : List[str] ):
'''simple docstring'''
pass
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ , A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Tuple = model_class(_a )
A_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Any = [*signature.parameters.keys()]
A_ : int = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,_a )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a ( self : List[Any] ):
'''simple docstring'''
pass
def _a ( self : Tuple ):
'''simple docstring'''
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _a ( self : List[Any] ):
'''simple docstring'''
def check_hidden_states_output(_a : Any ,_a : Tuple ,_a : Optional[Any] ):
A_ : Dict = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
A_ : str = model(**self._prepare_for_class(_a ,_a ) )
A_ : int = outputs.hidden_states
A_ : Any = 5
self.assertEqual(len(_a ) ,_a )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
A_ : Union[str, Any] = 2
for i in range(len(_a ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) ,[self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] ,)
divisor *= 2
self.assertEqual(self.model_tester.output_stride ,divisor // 2 )
A_ , A_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Dict = True
check_hidden_states_output(_a ,_a ,_a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : int = True
check_hidden_states_output(_a ,_a ,_a )
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
def _a ( self : str ):
'''simple docstring'''
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_a )
@slow
def _a ( self : int ):
'''simple docstring'''
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[Any] = MobileViTModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCamelCase ( ):
A_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _a ( self : List[str] ):
'''simple docstring'''
return MobileViTImageProcessor.from_pretrained("""apple/mobilevit-xx-small""" ) if is_vision_available() else None
@slow
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : int = MobileViTForImageClassification.from_pretrained("""apple/mobilevit-xx-small""" ).to(_a )
A_ : Dict = self.default_image_processor
A_ : Union[str, Any] = prepare_img()
A_ : int = image_processor(images=_a ,return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
A_ : str = model(**_a )
# verify the logits
A_ : List[str] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_a )
A_ : Optional[int] = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_a ,atol=1e-4 ) )
@slow
def _a ( self : str ):
'''simple docstring'''
A_ : List[Any] = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
A_ : Optional[int] = model.to(_a )
A_ : Optional[Any] = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
A_ : List[str] = prepare_img()
A_ : List[str] = image_processor(images=_a ,return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
A_ : Optional[int] = model(**_a )
A_ : Optional[int] = outputs.logits
# verify the logits
A_ : Any = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape ,_a )
A_ : Any = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] ,device=_a ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,_a ,atol=1e-4 ) )
@slow
def _a ( self : Any ):
'''simple docstring'''
A_ : Optional[Any] = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
A_ : Tuple = model.to(_a )
A_ : Optional[int] = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
A_ : int = prepare_img()
A_ : int = image_processor(images=_a ,return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
A_ : Optional[Any] = model(**_a )
A_ : Optional[int] = outputs.logits.detach().cpu()
A_ : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=_a ,target_sizes=[(50, 60)] )
A_ : Union[str, Any] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape ,_a )
A_ : Any = image_processor.post_process_semantic_segmentation(outputs=_a )
A_ : Optional[Any] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape ,_a )
| 665 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__magic_name__ = logging.get_logger(__name__)
# TODO: upload to AWS
__magic_name__ = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """retribert"""
def __init__( self : int ,_a : Dict=30522 ,_a : List[Any]=768 ,_a : Optional[Any]=8 ,_a : str=12 ,_a : str=3072 ,_a : Tuple="gelu" ,_a : Optional[int]=0.1 ,_a : Dict=0.1 ,_a : List[Any]=512 ,_a : Union[str, Any]=2 ,_a : Tuple=0.02 ,_a : List[str]=1e-12 ,_a : Dict=True ,_a : Tuple=128 ,_a : Optional[int]=0 ,**_a : Tuple ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,**_a )
A_ : Dict = vocab_size
A_ : int = hidden_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : Tuple = hidden_act
A_ : int = intermediate_size
A_ : Tuple = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : int = max_position_embeddings
A_ : Any = type_vocab_size
A_ : Optional[int] = initializer_range
A_ : Dict = layer_norm_eps
A_ : str = share_encoders
A_ : List[Any] = projection_dim
| 665 | 1 |
'''simple docstring'''
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = FunnelTokenizer
a_ = FunnelTokenizerFast
a_ = True
a_ = True
def _a ( self : Dict ):
'''simple docstring'''
super().setUp()
A_ : Tuple = [
"""<unk>""",
"""<cls>""",
"""<sep>""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
A_ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def _a ( self : Optional[int] ,**_a : str ):
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname ,**_a )
def _a ( self : Dict ,**_a : List[Any] ):
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname ,**_a )
def _a ( self : Dict ,_a : str ):
'''simple docstring'''
A_ : int = """UNwant\u00E9d,running"""
A_ : Optional[Any] = """unwanted, running"""
return input_text, output_text
def _a ( self : int ):
'''simple docstring'''
A_ : Optional[Any] = self.tokenizer_class(self.vocab_file )
A_ : Union[str, Any] = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(_a ,["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) ,[7, 4, 5, 10, 8, 9] )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : int = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
A_ : Optional[Any] = tokenizer("""UNwant\u00E9d,running""" )
A_ : int = len(inputs["""input_ids"""] ) - 1
self.assertListEqual(inputs["""token_type_ids"""] ,[2] + [0] * sentence_len )
A_ : List[str] = tokenizer("""UNwant\u00E9d,running""" ,"""UNwant\u00E9d,running""" )
self.assertListEqual(inputs["""token_type_ids"""] ,[2] + [0] * sentence_len + [1] * sentence_len )
| 665 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'spiece.model'}
__magic_name__ = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
__magic_name__ = {
'google/bigbird-roberta-base': 4_096,
'google/bigbird-roberta-large': 4_096,
'google/bigbird-base-trivia-itc': 4_096,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
a_ = []
def __init__( self : Optional[int] ,_a : int ,_a : Optional[Any]="<unk>" ,_a : int="<s>" ,_a : str="</s>" ,_a : Optional[Any]="<pad>" ,_a : Tuple="[SEP]" ,_a : Tuple="[MASK]" ,_a : Union[str, Any]="[CLS]" ,_a : Optional[Dict[str, Any]] = None ,**_a : Any ,):
'''simple docstring'''
A_ : Dict = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else bos_token
A_ : Union[str, Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else eos_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else unk_token
A_ : Union[str, Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else pad_token
A_ : Any = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else cls_token
A_ : Optional[int] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
A_ : List[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
A_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a ,eos_token=_a ,unk_token=_a ,pad_token=_a ,sep_token=_a ,mask_token=_a ,cls_token=_a ,sp_model_kwargs=self.sp_model_kwargs ,**_a ,)
A_ : Optional[int] = vocab_file
A_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def _a ( self : Union[str, Any] ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Tuple = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = self.__dict__.copy()
A_ : Union[str, Any] = None
return state
def __setstate__( self : List[Any] ,_a : Any ):
'''simple docstring'''
A_ : Tuple = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
A_ : Tuple = {}
A_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self : Union[str, Any] ,_a : str ):
'''simple docstring'''
return self.sp_model.encode(_a ,out_type=_a )
def _a ( self : Optional[int] ,_a : str ):
'''simple docstring'''
return self.sp_model.piece_to_id(_a )
def _a ( self : int ,_a : Optional[int] ):
'''simple docstring'''
A_ : List[str] = self.sp_model.IdToPiece(_a )
return token
def _a ( self : Dict ,_a : int ):
'''simple docstring'''
A_ : int = []
A_ : Any = """"""
A_ : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
A_ : Dict = True
A_ : Union[str, Any] = []
else:
current_sub_tokens.append(_a )
A_ : str = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def _a ( self : int ,_a : List[int] ,_a : bool = False ,_a : bool = None ,_a : bool = True ,**_a : str ,):
'''simple docstring'''
A_ : Any = kwargs.pop("""use_source_tokenizer""" ,_a )
A_ : Union[str, Any] = self.convert_ids_to_tokens(_a ,skip_special_tokens=_a )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
A_ : str = []
A_ : int = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
A_ : List[str] = []
sub_texts.append(_a )
else:
current_sub_text.append(_a )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
A_ : Optional[int] = re.sub(r""" (\[(MASK|SEP)\])""" ,r"""\1""" ,""" """.join(_a ) )
else:
A_ : Tuple = """""".join(_a )
A_ : str = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
A_ : Optional[Any] = self.clean_up_tokenization(_a )
return clean_text
else:
return text
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_a )
elif not os.path.isfile(self.vocab_file ):
with open(_a ,"""wb""" ) as fi:
A_ : str = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
def _a ( self : Optional[Any] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ : List[Any] = [self.cls_token_id]
A_ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self : Optional[int] ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
def _a ( self : Tuple ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Tuple = [self.sep_token_id]
A_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 665 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : int):
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""")
A_ : int = str(bin(lowerCamelCase))
binary_number += "0" * shift_amount
return binary_number
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : int):
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""")
A_ : Any = str(bin(lowerCamelCase))[2:]
if shift_amount >= len(lowerCamelCase):
return "0b0"
A_ : Optional[int] = binary_number[: len(lowerCamelCase) - shift_amount]
return "0b" + shifted_binary_number
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : int):
if number >= 0: # Get binary representation of positive number
A_ : Optional[int] = """0""" + str(bin(lowerCamelCase)).strip("""-""")[2:]
else: # Get binary (2's complement) representation of negative number
A_ : Optional[int] = len(bin(lowerCamelCase)[3:]) # Find 2's complement of number
A_ : Optional[Any] = bin(abs(lowerCamelCase) - (1 << binary_number_length))[3:]
A_ : int = (
"""1""" + """0""" * (binary_number_length - len(lowerCamelCase)) + binary_number
)
if shift_amount >= len(lowerCamelCase):
return "0b" + binary_number[0] * len(lowerCamelCase)
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(lowerCamelCase) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
a_ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a_ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _a ( self : List[str] ,_a : int ,_a : Any ,_a : int ):
'''simple docstring'''
A_ : Dict = TextaTextGenerationPipeline(model=_a ,tokenizer=_a )
return generator, ["Something to write", "Something else"]
def _a ( self : str ,_a : Union[str, Any] ,_a : int ):
'''simple docstring'''
A_ : Any = generator("""Something there""" )
self.assertEqual(_a ,[{"""generated_text""": ANY(_a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
A_ : List[Any] = generator(["""This is great !""", """Something else"""] ,num_return_sequences=2 ,do_sample=_a )
self.assertEqual(
_a ,[
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] ,)
A_ : List[str] = generator(
["""This is great !""", """Something else"""] ,num_return_sequences=2 ,batch_size=2 ,do_sample=_a )
self.assertEqual(
_a ,[
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] ,)
with self.assertRaises(_a ):
generator(4 )
@require_torch
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : int = pipeline("""text2text-generation""" ,model="""patrickvonplaten/t5-tiny-random""" ,framework="""pt""" )
# do_sample=False necessary for reproducibility
A_ : Tuple = generator("""Something there""" ,do_sample=_a )
self.assertEqual(_a ,[{"""generated_text""": """"""}] )
A_ : Optional[int] = 3
A_ : Tuple = generator(
"""Something there""" ,num_return_sequences=_a ,num_beams=_a ,)
A_ : Optional[Any] = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(_a ,_a )
A_ : Optional[int] = generator("""This is a test""" ,do_sample=_a ,num_return_sequences=2 ,return_tensors=_a )
self.assertEqual(
_a ,[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] ,)
A_ : Dict = generator.model.config.eos_token_id
A_ : Optional[int] = """<pad>"""
A_ : List[Any] = generator(
["""This is a test""", """This is a second test"""] ,do_sample=_a ,num_return_sequences=2 ,batch_size=2 ,return_tensors=_a ,)
self.assertEqual(
_a ,[
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] ,)
@require_tf
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = pipeline("""text2text-generation""" ,model="""patrickvonplaten/t5-tiny-random""" ,framework="""tf""" )
# do_sample=False necessary for reproducibility
A_ : Dict = generator("""Something there""" ,do_sample=_a )
self.assertEqual(_a ,[{"""generated_text""": """"""}] )
| 665 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'facebook/data2vec-vision-base-ft': (
'https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """data2vec-vision"""
def __init__( self : str ,_a : Optional[Any]=768 ,_a : Optional[int]=12 ,_a : List[Any]=12 ,_a : str=3072 ,_a : Optional[int]="gelu" ,_a : List[str]=0.0 ,_a : List[str]=0.0 ,_a : List[str]=0.02 ,_a : List[Any]=1e-12 ,_a : Tuple=224 ,_a : Optional[Any]=16 ,_a : Union[str, Any]=3 ,_a : str=False ,_a : Tuple=False ,_a : Dict=False ,_a : int=False ,_a : Optional[int]=0.1 ,_a : str=0.1 ,_a : Union[str, Any]=True ,_a : str=[3, 5, 7, 11] ,_a : Optional[int]=[1, 2, 3, 6] ,_a : List[Any]=True ,_a : Any=0.4 ,_a : Optional[int]=256 ,_a : Union[str, Any]=1 ,_a : str=False ,_a : Optional[int]=255 ,**_a : Dict ,):
'''simple docstring'''
super().__init__(**_a )
A_ : Optional[Any] = hidden_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : Optional[Any] = intermediate_size
A_ : Optional[Any] = hidden_act
A_ : str = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : List[str] = initializer_range
A_ : List[str] = layer_norm_eps
A_ : int = image_size
A_ : Tuple = patch_size
A_ : Any = num_channels
A_ : Optional[Any] = use_mask_token
A_ : Union[str, Any] = use_absolute_position_embeddings
A_ : List[str] = use_relative_position_bias
A_ : Dict = use_shared_relative_position_bias
A_ : Optional[int] = layer_scale_init_value
A_ : Optional[int] = drop_path_rate
A_ : Dict = use_mean_pooling
# decode head attributes (semantic segmentation)
A_ : List[Any] = out_indices
A_ : List[str] = pool_scales
# auxiliary head attributes (semantic segmentation)
A_ : Optional[int] = use_auxiliary_head
A_ : List[Any] = auxiliary_loss_weight
A_ : str = auxiliary_channels
A_ : List[Any] = auxiliary_num_convs
A_ : int = auxiliary_concat_input
A_ : str = semantic_loss_ignore_index
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = version.parse("""1.11""" )
@property
def _a ( self : Any ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 1e-4
| 665 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """gpt_bigcode"""
a_ = ["""past_key_values"""]
a_ = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Optional[int] ,_a : Optional[int]=50257 ,_a : Dict=1024 ,_a : Union[str, Any]=768 ,_a : Union[str, Any]=12 ,_a : Union[str, Any]=12 ,_a : Tuple=None ,_a : int="gelu_pytorch_tanh" ,_a : Optional[Any]=0.1 ,_a : List[str]=0.1 ,_a : Union[str, Any]=0.1 ,_a : List[Any]=1e-5 ,_a : List[str]=0.02 ,_a : Any=True ,_a : Union[str, Any]=True ,_a : Tuple=50256 ,_a : Optional[int]=50256 ,_a : int=True ,_a : Optional[int]=True ,_a : Optional[int]=True ,**_a : List[str] ,):
'''simple docstring'''
A_ : Optional[Any] = vocab_size
A_ : int = n_positions
A_ : Union[str, Any] = n_embd
A_ : int = n_layer
A_ : Optional[int] = n_head
A_ : Union[str, Any] = n_inner
A_ : List[Any] = activation_function
A_ : Dict = resid_pdrop
A_ : int = embd_pdrop
A_ : Optional[int] = attn_pdrop
A_ : Union[str, Any] = layer_norm_epsilon
A_ : int = initializer_range
A_ : Union[str, Any] = scale_attn_weights
A_ : List[str] = use_cache
A_ : Tuple = attention_softmax_in_fpaa
A_ : List[str] = scale_attention_softmax_in_fpaa
A_ : Union[str, Any] = multi_query
A_ : Any = bos_token_id
A_ : Optional[int] = eos_token_id
super().__init__(bos_token_id=_a ,eos_token_id=_a ,**_a )
| 665 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'microsoft/git-base': 'https://huggingface.co/microsoft/git-base/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """git_vision_model"""
def __init__( self : Union[str, Any] ,_a : List[Any]=768 ,_a : Optional[Any]=3072 ,_a : Optional[Any]=12 ,_a : Tuple=12 ,_a : Tuple=3 ,_a : Any=224 ,_a : List[str]=16 ,_a : List[Any]="quick_gelu" ,_a : Dict=1e-5 ,_a : Union[str, Any]=0.0 ,_a : List[str]=0.02 ,**_a : int ,):
'''simple docstring'''
super().__init__(**_a )
A_ : Optional[int] = hidden_size
A_ : Union[str, Any] = intermediate_size
A_ : List[Any] = num_hidden_layers
A_ : Optional[int] = num_attention_heads
A_ : Optional[Any] = num_channels
A_ : Optional[Any] = patch_size
A_ : int = image_size
A_ : str = initializer_range
A_ : Union[str, Any] = attention_dropout
A_ : List[Any] = layer_norm_eps
A_ : Tuple = hidden_act
@classmethod
def _a ( cls : Optional[int] ,_a : Union[str, os.PathLike] ,**_a : Optional[int] ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : Any = cls.get_config_dict(_a ,**_a )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
A_ : int = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """git"""
def __init__( self : List[Any] ,_a : Union[str, Any]=None ,_a : Tuple=30522 ,_a : int=768 ,_a : int=6 ,_a : Optional[int]=12 ,_a : List[Any]=3072 ,_a : int="gelu" ,_a : str=0.1 ,_a : Union[str, Any]=0.1 ,_a : Union[str, Any]=1024 ,_a : Any=0.02 ,_a : Optional[Any]=1e-12 ,_a : Optional[int]=0 ,_a : List[Any]="absolute" ,_a : Optional[int]=True ,_a : Tuple=False ,_a : Optional[int]=101 ,_a : Dict=102 ,_a : Tuple=None ,**_a : List[Any] ,):
'''simple docstring'''
super().__init__(bos_token_id=_a ,eos_token_id=_a ,pad_token_id=_a ,**_a )
if vision_config is None:
A_ : Dict = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
A_ : List[Any] = GitVisionConfig(**_a )
A_ : int = vocab_size
A_ : Union[str, Any] = hidden_size
A_ : Optional[int] = num_hidden_layers
A_ : str = num_attention_heads
A_ : List[str] = hidden_act
A_ : Any = intermediate_size
A_ : Any = hidden_dropout_prob
A_ : Union[str, Any] = attention_probs_dropout_prob
A_ : Any = max_position_embeddings
A_ : Dict = initializer_range
A_ : Optional[int] = layer_norm_eps
A_ : Tuple = position_embedding_type
A_ : str = use_cache
A_ : str = tie_word_embeddings
A_ : Dict = num_image_with_embedding
A_ : Optional[int] = bos_token_id
A_ : Dict = eos_token_id
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
A_ : Union[str, Any] = self.vision_config.to_dict()
A_ : Optional[Any] = self.__class__.model_type
return output
| 665 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
__magic_name__ = {
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
__magic_name__ = {
'allenai/longformer-base-4096': 4_096,
'allenai/longformer-large-4096': 4_096,
'allenai/longformer-large-4096-finetuned-triviaqa': 4_096,
'allenai/longformer-base-4096-extra.pos.embd.only': 4_096,
'allenai/longformer-large-4096-extra.pos.embd.only': 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCamelCase ( ):
A_ : Union[str, Any] = (
list(range(ord("""!""") , ord("""~""") + 1)) + list(range(ord("""¡""") , ord("""¬""") + 1)) + list(range(ord("""®""") , ord("""ÿ""") + 1))
)
A_ : Optional[Any] = bs[:]
A_ : List[str] = 0
for b in range(2**8):
if b not in bs:
bs.append(lowerCamelCase)
cs.append(2**8 + n)
n += 1
A_ : List[Any] = [chr(lowerCamelCase) for n in cs]
return dict(zip(lowerCamelCase , lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : int):
A_ : int = set()
A_ : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
A_ : List[str] = char
return pairs
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self : int ,_a : Tuple ,_a : Union[str, Any] ,_a : Optional[Any]="replace" ,_a : Union[str, Any]="<s>" ,_a : Union[str, Any]="</s>" ,_a : int="</s>" ,_a : List[str]="<s>" ,_a : List[Any]="<unk>" ,_a : Any="<pad>" ,_a : Dict="<mask>" ,_a : Optional[int]=False ,**_a : List[Any] ,):
'''simple docstring'''
A_ : Dict = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else bos_token
A_ : Optional[int] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else eos_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else sep_token
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else cls_token
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else unk_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A_ : Any = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
super().__init__(
errors=_a ,bos_token=_a ,eos_token=_a ,unk_token=_a ,sep_token=_a ,cls_token=_a ,pad_token=_a ,mask_token=_a ,add_prefix_space=_a ,**_a ,)
with open(_a ,encoding="""utf-8""" ) as vocab_handle:
A_ : str = json.load(_a )
A_ : Optional[int] = {v: k for k, v in self.encoder.items()}
A_ : List[str] = errors # how to handle errors in decoding
A_ : List[str] = bytes_to_unicode()
A_ : str = {v: k for k, v in self.byte_encoder.items()}
with open(_a ,encoding="""utf-8""" ) as merges_handle:
A_ : Any = merges_handle.read().split("""\n""" )[1:-1]
A_ : str = [tuple(merge.split() ) for merge in bpe_merges]
A_ : int = dict(zip(_a ,range(len(_a ) ) ) )
A_ : List[Any] = {}
A_ : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A_ : Optional[Any] = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def _a ( self : Any ):
'''simple docstring'''
return len(self.encoder )
def _a ( self : str ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def _a ( self : int ,_a : int ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A_ : Optional[int] = tuple(_a )
A_ : Any = get_pairs(_a )
if not pairs:
return token
while True:
A_ : Optional[Any] = min(_a ,key=lambda _a : self.bpe_ranks.get(_a ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A_ , A_ : Dict = bigram
A_ : int = []
A_ : Optional[Any] = 0
while i < len(_a ):
try:
A_ : List[str] = word.index(_a ,_a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A_ : Tuple = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A_ : str = tuple(_a )
A_ : str = new_word
if len(_a ) == 1:
break
else:
A_ : int = get_pairs(_a )
A_ : Optional[int] = """ """.join(_a )
A_ : List[str] = word
return word
def _a ( self : Dict ,_a : Optional[int] ):
'''simple docstring'''
A_ : Any = []
for token in re.findall(self.pat ,_a ):
A_ : Any = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_a ).split(""" """ ) )
return bpe_tokens
def _a ( self : Union[str, Any] ,_a : Optional[int] ):
'''simple docstring'''
return self.encoder.get(_a ,self.encoder.get(self.unk_token ) )
def _a ( self : int ,_a : Dict ):
'''simple docstring'''
return self.decoder.get(_a )
def _a ( self : Optional[int] ,_a : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = """""".join(_a )
A_ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors )
return text
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_a ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_a ,ensure_ascii=_a ) + """\n""" )
A_ : int = 0
with open(_a ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _a : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
A_ : Dict = token_index
writer.write(""" """.join(_a ) + """\n""" )
index += 1
return vocab_file, merge_file
def _a ( self : List[str] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ : int = [self.cls_token_id]
A_ : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _a ( self : int ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def _a ( self : Any ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Union[str, Any] = [self.sep_token_id]
A_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a ( self : str ,_a : Optional[int] ,_a : Union[str, Any]=False ,**_a : Dict ):
'''simple docstring'''
A_ : Any = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_a ) > 0 and not text[0].isspace()):
A_ : Optional[int] = """ """ + text
return (text, kwargs)
| 665 | 1 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] ,_a : Optional[int] ,_a : str=7 ,_a : Optional[Any]=3 ,_a : List[Any]=30 ,_a : str=400 ,_a : Tuple=True ,_a : Union[str, Any]=None ,_a : str=True ,_a : List[str]=[0.5, 0.5, 0.5] ,_a : List[str]=[0.5, 0.5, 0.5] ,_a : Optional[int]=True ,_a : Optional[int]=1 / 255 ,_a : Any=True ,):
'''simple docstring'''
A_ : Any = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
A_ : Optional[int] = parent
A_ : str = batch_size
A_ : Optional[Any] = num_channels
A_ : Optional[int] = min_resolution
A_ : List[str] = max_resolution
A_ : Union[str, Any] = do_resize
A_ : Tuple = size
A_ : Any = do_normalize
A_ : int = image_mean
A_ : Optional[int] = image_std
A_ : int = do_rescale
A_ : Optional[Any] = rescale_factor
A_ : Optional[Any] = do_pad
def _a ( self : List[Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _a ( self : Union[str, Any] ,_a : Optional[int] ,_a : Tuple=False ):
'''simple docstring'''
if not batched:
A_ : Optional[Any] = image_inputs[0]
if isinstance(_a ,Image.Image ):
A_ , A_ : List[Any] = image.size
else:
A_ , A_ : Dict = image.shape[1], image.shape[2]
if w < h:
A_ : List[Any] = int(self.size["""shortest_edge"""] * h / w )
A_ : str = self.size["""shortest_edge"""]
elif w > h:
A_ : Optional[int] = self.size["""shortest_edge"""]
A_ : Optional[Any] = int(self.size["""shortest_edge"""] * w / h )
else:
A_ : str = self.size["""shortest_edge"""]
A_ : Union[str, Any] = self.size["""shortest_edge"""]
else:
A_ : Any = []
for image in image_inputs:
A_ , A_ : Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A_ : List[str] = max(_a ,key=lambda _a : item[0] )[0]
A_ : Tuple = max(_a ,key=lambda _a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = ConditionalDetrImageProcessor if is_vision_available() else None
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Dict = ConditionalDetrImageProcessingTester(self )
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self : Any ):
'''simple docstring'''
A_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a ,"""image_mean""" ) )
self.assertTrue(hasattr(_a ,"""image_std""" ) )
self.assertTrue(hasattr(_a ,"""do_normalize""" ) )
self.assertTrue(hasattr(_a ,"""do_resize""" ) )
self.assertTrue(hasattr(_a ,"""size""" ) )
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad ,_a )
A_ : List[str] = self.image_processing_class.from_dict(
self.image_processor_dict ,size=42 ,max_size=84 ,pad_and_return_pixel_mask=_a )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad ,_a )
def _a ( self : Dict ):
'''simple docstring'''
pass
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a ,Image.Image )
# Test not batched input
A_ : Any = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
A_ , A_ : List[Any] = self.image_processor_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
A_ , A_ : str = self.image_processor_tester.get_expected_values(_a ,batched=_a )
A_ : Tuple = image_processing(_a ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def _a ( self : Any ):
'''simple docstring'''
A_ : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : Tuple = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_a ,numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a ,np.ndarray )
# Test not batched input
A_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
A_ , A_ : Any = self.image_processor_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
A_ : Any = image_processing(_a ,return_tensors="""pt""" ).pixel_values
A_ , A_ : List[Any] = self.image_processor_tester.get_expected_values(_a ,batched=_a )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_a ,torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a ,torch.Tensor )
# Test not batched input
A_ : Tuple = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
A_ , A_ : str = self.image_processor_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
A_ : Dict = image_processing(_a ,return_tensors="""pt""" ).pixel_values
A_ , A_ : Union[str, Any] = self.image_processor_tester.get_expected_values(_a ,batched=_a )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def _a ( self : Any ):
'''simple docstring'''
A_ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" ,"""r""" ) as f:
A_ : str = json.loads(f.read() )
A_ : Any = {"""image_id""": 39769, """annotations""": target}
# encode them
A_ : Optional[Any] = ConditionalDetrImageProcessor.from_pretrained("""microsoft/conditional-detr-resnet-50""" )
A_ : Any = image_processing(images=_a ,annotations=_a ,return_tensors="""pt""" )
# verify pixel values
A_ : Optional[int] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape ,_a )
A_ : Union[str, Any] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] ,_a ,atol=1e-4 ) )
# verify area
A_ : Optional[int] = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] ,_a ) )
# verify boxes
A_ : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape ,_a )
A_ : Dict = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] ,_a ,atol=1e-3 ) )
# verify image_id
A_ : List[str] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] ,_a ) )
# verify is_crowd
A_ : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] ,_a ) )
# verify class_labels
A_ : Any = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] ,_a ) )
# verify orig_size
A_ : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] ,_a ) )
# verify size
A_ : Union[str, Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] ,_a ) )
@slow
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" ,"""r""" ) as f:
A_ : int = json.loads(f.read() )
A_ : str = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target}
A_ : str = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
A_ : str = ConditionalDetrImageProcessor(format="""coco_panoptic""" )
A_ : str = image_processing(images=_a ,annotations=_a ,masks_path=_a ,return_tensors="""pt""" )
# verify pixel values
A_ : Any = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape ,_a )
A_ : List[str] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] ,_a ,atol=1e-4 ) )
# verify area
A_ : Optional[Any] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] ,_a ) )
# verify boxes
A_ : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape ,_a )
A_ : Dict = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] ,_a ,atol=1e-3 ) )
# verify image_id
A_ : Optional[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] ,_a ) )
# verify is_crowd
A_ : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] ,_a ) )
# verify class_labels
A_ : List[str] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] ,_a ) )
# verify masks
A_ : Dict = 822873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() ,_a )
# verify orig_size
A_ : List[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] ,_a ) )
# verify size
A_ : int = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] ,_a ) )
| 665 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.txt'}
__magic_name__ = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__magic_name__ = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__magic_name__ = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_INIT_CONFIGURATION
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ConvBertTokenizer
def __init__( self : str ,_a : Dict=None ,_a : List[Any]=None ,_a : Dict=True ,_a : List[str]="[UNK]" ,_a : Any="[SEP]" ,_a : str="[PAD]" ,_a : List[Any]="[CLS]" ,_a : List[str]="[MASK]" ,_a : Union[str, Any]=True ,_a : Any=None ,**_a : Optional[int] ,):
'''simple docstring'''
super().__init__(
_a ,tokenizer_file=_a ,do_lower_case=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,tokenize_chinese_chars=_a ,strip_accents=_a ,**_a ,)
A_ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,_a ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,_a ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,_a ) != tokenize_chinese_chars
):
A_ : Dict = getattr(_a ,normalizer_state.pop("""type""" ) )
A_ : str = do_lower_case
A_ : Any = strip_accents
A_ : int = tokenize_chinese_chars
A_ : Tuple = normalizer_class(**_a )
A_ : Any = do_lower_case
def _a ( self : List[Any] ,_a : List[Any] ,_a : Any=None ):
'''simple docstring'''
A_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a ( self : Dict ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : int = [self.sep_token_id]
A_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
A_ : List[Any] = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
| 665 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
__magic_name__ = None
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
__magic_name__ = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
__magic_name__ = {
'facebook/nllb-large-en-ro': 1_024,
'facebook/nllb-200-distilled-600M': 1_024,
}
# fmt: off
__magic_name__ = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = ["""input_ids""", """attention_mask"""]
a_ = NllbTokenizer
a_ = []
a_ = []
def __init__( self : Any ,_a : Dict=None ,_a : Optional[Any]=None ,_a : List[str]="<s>" ,_a : Union[str, Any]="</s>" ,_a : List[Any]="</s>" ,_a : int="<s>" ,_a : List[Any]="<unk>" ,_a : str="<pad>" ,_a : Tuple="<mask>" ,_a : List[str]=None ,_a : str=None ,_a : List[Any]=None ,_a : Dict=False ,**_a : int ,):
'''simple docstring'''
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
A_ : Any = legacy_behaviour
super().__init__(
vocab_file=_a ,tokenizer_file=_a ,bos_token=_a ,eos_token=_a ,sep_token=_a ,cls_token=_a ,unk_token=_a ,pad_token=_a ,mask_token=_a ,src_lang=_a ,tgt_lang=_a ,additional_special_tokens=_a ,legacy_behaviour=_a ,**_a ,)
A_ : List[str] = vocab_file
A_ : Any = False if not self.vocab_file else True
A_ : Dict = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
A_ : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(_a ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
A_ : Optional[int] = src_lang if src_lang is not None else """eng_Latn"""
A_ : Optional[int] = self.convert_tokens_to_ids(self._src_lang )
A_ : Union[str, Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _a ( self : Tuple ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def _a ( self : Optional[Any] ,_a : str ):
'''simple docstring'''
A_ : Union[str, Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _a ( self : Any ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _a ( self : int ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Dict = [self.sep_token_id]
A_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a ( self : str ,_a : Union[str, Any] ,_a : str ,_a : Optional[str] ,_a : Optional[str] ,**_a : Optional[Any] ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
A_ : Optional[Any] = src_lang
A_ : Optional[int] = self(_a ,add_special_tokens=_a ,return_tensors=_a ,**_a )
A_ : Union[str, Any] = self.convert_tokens_to_ids(_a )
A_ : Dict = tgt_lang_id
return inputs
def _a ( self : Tuple ,_a : List[str] ,_a : str = "eng_Latn" ,_a : Optional[List[str]] = None ,_a : str = "fra_Latn" ,**_a : Tuple ,):
'''simple docstring'''
A_ : List[Any] = src_lang
A_ : List[Any] = tgt_lang
return super().prepare_seqaseq_batch(_a ,_a ,**_a )
def _a ( self : Any ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def _a ( self : int ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _a ( self : Tuple ,_a : int ):
'''simple docstring'''
A_ : Optional[Any] = self.convert_tokens_to_ids(_a )
if self.legacy_behaviour:
A_ : Dict = []
A_ : int = [self.eos_token_id, self.cur_lang_code]
else:
A_ : Tuple = [self.cur_lang_code]
A_ : List[str] = [self.eos_token_id]
A_ : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
A_ : Optional[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
A_ : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str ,pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,)
def _a ( self : List[str] ,_a : str ):
'''simple docstring'''
A_ : Any = self.convert_tokens_to_ids(_a )
if self.legacy_behaviour:
A_ : List[Any] = []
A_ : Tuple = [self.eos_token_id, self.cur_lang_code]
else:
A_ : Union[str, Any] = [self.cur_lang_code]
A_ : Tuple = [self.eos_token_id]
A_ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
A_ : Optional[int] = self.convert_ids_to_tokens(self.suffix_tokens )
A_ : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str ,pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,)
def _a ( self : List[str] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
A_ : Optional[Any] = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file ,_a )
return (out_vocab_file,)
| 665 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all BART models at https://huggingface.co/models?filter=bart
__magic_name__ = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
'tokenizer_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json',
},
}
__magic_name__ = {
'facebook/bart-base': 1_024,
'facebook/bart-large': 1_024,
'facebook/bart-large-mnli': 1_024,
'facebook/bart-large-cnn': 1_024,
'facebook/bart-large-xsum': 1_024,
'yjernite/bart_eli5': 1_024,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
a_ = BartTokenizer
def __init__( self : str ,_a : Any=None ,_a : Optional[int]=None ,_a : int=None ,_a : Optional[int]="replace" ,_a : Dict="<s>" ,_a : Optional[Any]="</s>" ,_a : Dict="</s>" ,_a : Tuple="<s>" ,_a : Optional[Any]="<unk>" ,_a : List[str]="<pad>" ,_a : int="<mask>" ,_a : str=False ,_a : List[str]=True ,**_a : Dict ,):
'''simple docstring'''
super().__init__(
_a ,_a ,tokenizer_file=_a ,errors=_a ,bos_token=_a ,eos_token=_a ,sep_token=_a ,cls_token=_a ,unk_token=_a ,pad_token=_a ,mask_token=_a ,add_prefix_space=_a ,trim_offsets=_a ,**_a ,)
A_ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" ,_a ) != add_prefix_space:
A_ : List[str] = getattr(_a ,pre_tok_state.pop("""type""" ) )
A_ : Optional[int] = add_prefix_space
A_ : int = pre_tok_class(**_a )
A_ : str = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A_ : str = """post_processor"""
A_ : List[Any] = getattr(self.backend_tokenizer ,_a ,_a )
if tokenizer_component_instance:
A_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A_ : Tuple = tuple(state["""sep"""] )
if "cls" in state:
A_ : Tuple = tuple(state["""cls"""] )
A_ : List[str] = False
if state.get("""add_prefix_space""" ,_a ) != add_prefix_space:
A_ : Dict = add_prefix_space
A_ : Any = True
if state.get("""trim_offsets""" ,_a ) != trim_offsets:
A_ : Union[str, Any] = trim_offsets
A_ : List[Any] = True
if changes_to_apply:
A_ : Optional[int] = getattr(_a ,state.pop("""type""" ) )
A_ : Tuple = component_class(**_a )
setattr(self.backend_tokenizer ,_a ,_a )
@property
def _a ( self : List[str] ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def _a ( self : Union[str, Any] ,_a : Any ):
'''simple docstring'''
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else value
A_ : List[Any] = value
def _a ( self : str ,*_a : str ,**_a : Optional[int] ):
'''simple docstring'''
A_ : Optional[Any] = kwargs.get("""is_split_into_words""" ,_a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*_a ,**_a )
def _a ( self : str ,*_a : List[Any] ,**_a : str ):
'''simple docstring'''
A_ : List[str] = kwargs.get("""is_split_into_words""" ,_a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*_a ,**_a )
def _a ( self : Optional[int] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
A_ : str = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
def _a ( self : str ,_a : Optional[int] ,_a : int=None ):
'''simple docstring'''
A_ : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _a ( self : Optional[int] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Dict = [self.sep_token_id]
A_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 665 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 |
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : str):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
A_ : int = TapasConfig.from_json_file(lowerCamelCase)
# set absolute/relative position embeddings parameter
A_ : List[Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
A_ : Optional[int] = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "WTQ":
# run_task_main.py hparams
A_ : Tuple = 4
A_ : Optional[Any] = True
# hparam_utils.py hparams
A_ : Any = 0.66_4694
A_ : str = 0.20_7951
A_ : Any = 0.12_1194
A_ : str = True
A_ : Dict = True
A_ : int = False
A_ : int = 0.035_2513
A_ : Tuple = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
A_ : int = 4
A_ : Union[str, Any] = False
# hparam_utils.py hparams
A_ : Dict = 36.4519
A_ : List[Any] = 0.90_3421
A_ : Any = 222.088
A_ : Optional[Any] = True
A_ : Optional[int] = True
A_ : Optional[Any] = True
A_ : Optional[int] = 0.76_3141
A_ : Any = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "TABFACT":
A_ : Any = TapasForSequenceClassification(config=lowerCamelCase)
elif task == "MLM":
A_ : List[Any] = TapasForMaskedLM(config=lowerCamelCase)
elif task == "INTERMEDIATE_PRETRAINING":
A_ : Union[str, Any] = TapasModel(config=lowerCamelCase)
else:
raise ValueError(F'Task {task} not supported.')
print(F'Building PyTorch model from configuration: {config}')
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Save pytorch-model (weights and configuration)
print(F'Save PyTorch model to {pytorch_dump_path}')
model.save_pretrained(lowerCamelCase)
# Save tokenizer files
print(F'Save tokenizer files to {pytorch_dump_path}')
A_ : Optional[Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512)
tokenizer.save_pretrained(lowerCamelCase)
print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__magic_name__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 665 | 1 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
__magic_name__ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _a ( self : List[Any] ,_a : Any ):
'''simple docstring'''
if isinstance(_a ,_a ):
A_ : List[str] = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__( self : Union[str, Any] ,_a : List[Any] ,_a : str ,_a : Optional[int] ):
'''simple docstring'''
if len(_a ) == 0 or len(_a ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(_a ) )
if isinstance(_a ,_a ):
A_ : List[Any] = [sequences]
A_ : Any = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(_a )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] ,_a : str=ZeroShotClassificationArgumentHandler() ,*_a : Optional[int] ,**_a : Optional[Any] ):
'''simple docstring'''
A_ : int = args_parser
super().__init__(*_a ,**_a )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def _a ( self : Union[str, Any] ):
'''simple docstring'''
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def _a ( self : Any ,_a : Dict ,_a : Optional[int]=True ,_a : Dict=True ,_a : str=TruncationStrategy.ONLY_FIRST ,**_a : int ):
'''simple docstring'''
A_ : str = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
A_ : Tuple = self.tokenizer.eos_token
try:
A_ : Union[str, Any] = self.tokenizer(
_a ,add_special_tokens=_a ,return_tensors=_a ,padding=_a ,truncation=_a ,)
except Exception as e:
if "too short" in str(_a ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
A_ : Any = self.tokenizer(
_a ,add_special_tokens=_a ,return_tensors=_a ,padding=_a ,truncation=TruncationStrategy.DO_NOT_TRUNCATE ,)
else:
raise e
return inputs
def _a ( self : Tuple ,**_a : Union[str, Any] ):
'''simple docstring'''
if kwargs.get("""multi_class""" ,_a ) is not None:
A_ : List[Any] = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
A_ : List[str] = {}
if "candidate_labels" in kwargs:
A_ : int = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
A_ : Optional[Any] = kwargs["""hypothesis_template"""]
A_ : List[Any] = {}
if "multi_label" in kwargs:
A_ : Any = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__( self : str ,_a : Union[str, List[str]] ,*_a : List[Any] ,**_a : Optional[int] ,):
'''simple docstring'''
if len(_a ) == 0:
pass
elif len(_a ) == 1 and "candidate_labels" not in kwargs:
A_ : List[Any] = args[0]
else:
raise ValueError(f'Unable to understand extra arguments {args}' )
return super().__call__(_a ,**_a )
def _a ( self : Tuple ,_a : Any ,_a : List[Any]=None ,_a : Any="This example is {}." ):
'''simple docstring'''
A_ , A_ : Optional[Any] = self._args_parser(_a ,_a ,_a )
for i, (candidate_label, sequence_pair) in enumerate(zip(_a ,_a ) ):
A_ : Optional[int] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(_a ) - 1,
**model_input,
}
def _a ( self : Any ,_a : str ):
'''simple docstring'''
A_ : Optional[Any] = inputs["""candidate_label"""]
A_ : List[Any] = inputs["""sequence"""]
A_ : Optional[Any] = {k: inputs[k] for k in self.tokenizer.model_input_names}
A_ : List[str] = self.model(**_a )
A_ : Tuple = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def _a ( self : Dict ,_a : int ,_a : Tuple=False ):
'''simple docstring'''
A_ : List[str] = [outputs["""candidate_label"""] for outputs in model_outputs]
A_ : str = [outputs["""sequence"""] for outputs in model_outputs]
A_ : Any = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
A_ : Union[str, Any] = logits.shape[0]
A_ : Optional[int] = len(_a )
A_ : Optional[Any] = N // n
A_ : List[Any] = logits.reshape((num_sequences, n, -1) )
if multi_label or len(_a ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
A_ : Dict = self.entailment_id
A_ : List[Any] = -1 if entailment_id == 0 else 0
A_ : List[Any] = reshaped_outputs[..., [contradiction_id, entailment_id]]
A_ : List[str] = np.exp(_a ) / np.exp(_a ).sum(-1 ,keepdims=_a )
A_ : Optional[int] = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
A_ : Dict = reshaped_outputs[..., self.entailment_id]
A_ : Optional[int] = np.exp(_a ) / np.exp(_a ).sum(-1 ,keepdims=_a )
A_ : Dict = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 665 |
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""vqvae"""]
def __init__( self : Optional[Any] ,_a : AutoencoderKL ,_a : UNetaDConditionModel ,_a : Mel ,_a : Union[DDIMScheduler, DDPMScheduler] ,):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_a ,scheduler=_a ,mel=_a ,vqvae=_a )
def _a ( self : str ):
'''simple docstring'''
return 50 if isinstance(self.scheduler ,_a ) else 1000
@torch.no_grad()
def __call__( self : Optional[int] ,_a : int = 1 ,_a : str = None ,_a : np.ndarray = None ,_a : int = 0 ,_a : int = 0 ,_a : int = None ,_a : torch.Generator = None ,_a : float = 0 ,_a : float = 0 ,_a : torch.Generator = None ,_a : float = 0 ,_a : torch.Tensor = None ,_a : torch.Tensor = None ,_a : int=True ,):
'''simple docstring'''
A_ : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(_a )
A_ : Union[str, Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
A_ : Tuple = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
A_ : int = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) ,generator=_a ,device=self.device ,)
A_ : List[Any] = noise
A_ : str = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_a ,_a )
A_ : Any = self.mel.audio_slice_to_image(_a )
A_ : Union[str, Any] = np.frombuffer(input_image.tobytes() ,dtype="""uint8""" ).reshape(
(input_image.height, input_image.width) )
A_ : Optional[Any] = (input_image / 255) * 2 - 1
A_ : Union[str, Any] = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device )
if self.vqvae is not None:
A_ : Union[str, Any] = self.vqvae.encode(torch.unsqueeze(_a ,0 ) ).latent_dist.sample(
generator=_a )[0]
A_ : List[str] = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
A_ : Any = self.scheduler.add_noise(_a ,_a ,self.scheduler.timesteps[start_step - 1] )
A_ : Tuple = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
A_ : Tuple = int(mask_start_secs * pixels_per_second )
A_ : str = int(mask_end_secs * pixels_per_second )
A_ : int = self.scheduler.add_noise(_a ,_a ,torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet ,_a ):
A_ : Optional[Any] = self.unet(_a ,_a ,_a )["""sample"""]
else:
A_ : List[Any] = self.unet(_a ,_a )["""sample"""]
if isinstance(self.scheduler ,_a ):
A_ : Dict = self.scheduler.step(
model_output=_a ,timestep=_a ,sample=_a ,eta=_a ,generator=_a ,)["""prev_sample"""]
else:
A_ : Any = self.scheduler.step(
model_output=_a ,timestep=_a ,sample=_a ,generator=_a ,)["""prev_sample"""]
if mask is not None:
if mask_start > 0:
A_ : Tuple = mask[:, step, :, :mask_start]
if mask_end > 0:
A_ : List[str] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
A_ : str = 1 / self.vqvae.config.scaling_factor * images
A_ : Union[str, Any] = self.vqvae.decode(_a )["""sample"""]
A_ : int = (images / 2 + 0.5).clamp(0 ,1 )
A_ : str = images.cpu().permute(0 ,2 ,3 ,1 ).numpy()
A_ : Optional[int] = (images * 255).round().astype("""uint8""" )
A_ : List[Any] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_a ,mode="""RGB""" ).convert("""L""" ) for _ in images) )
A_ : Tuple = [self.mel.image_to_audio(_a ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_a )[:, np.newaxis, :] ) ,**ImagePipelineOutput(_a ) )
@torch.no_grad()
def _a ( self : Union[str, Any] ,_a : List[Image.Image] ,_a : int = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler ,_a )
self.scheduler.set_timesteps(_a )
A_ : Optional[Any] = np.array(
[np.frombuffer(image.tobytes() ,dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] )
A_ : List[str] = (sample / 255) * 2 - 1
A_ : Optional[int] = torch.Tensor(_a ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ):
A_ : List[str] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
A_ : Any = self.scheduler.alphas_cumprod[t]
A_ : List[Any] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
A_ : str = 1 - alpha_prod_t
A_ : List[str] = self.unet(_a ,_a )["""sample"""]
A_ : str = (1 - alpha_prod_t_prev) ** 0.5 * model_output
A_ : Union[str, Any] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
A_ : Optional[int] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def _a ( _a : torch.Tensor ,_a : torch.Tensor ,_a : float ):
'''simple docstring'''
A_ : List[Any] = acos(torch.dot(torch.flatten(_a ) ,torch.flatten(_a ) ) / torch.norm(_a ) / torch.norm(_a ) )
return sin((1 - alpha) * theta ) * xa / sin(_a ) + sin(alpha * theta ) * xa / sin(_a )
| 665 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json',
'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """markuplm"""
def __init__( self : List[Any] ,_a : str=30522 ,_a : Union[str, Any]=768 ,_a : Dict=12 ,_a : Optional[int]=12 ,_a : Optional[Any]=3072 ,_a : Union[str, Any]="gelu" ,_a : Optional[int]=0.1 ,_a : int=0.1 ,_a : List[Any]=512 ,_a : str=2 ,_a : str=0.02 ,_a : int=1e-12 ,_a : Tuple=0 ,_a : List[Any]=0 ,_a : str=2 ,_a : Optional[Any]=256 ,_a : Optional[int]=1024 ,_a : Dict=216 ,_a : Optional[int]=1001 ,_a : Tuple=32 ,_a : List[Any]=50 ,_a : int="absolute" ,_a : Dict=True ,_a : Optional[Any]=None ,**_a : Union[str, Any] ,):
'''simple docstring'''
super().__init__(
pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a ,)
A_ : Dict = vocab_size
A_ : Optional[Any] = hidden_size
A_ : Optional[Any] = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : Dict = hidden_act
A_ : Optional[int] = intermediate_size
A_ : Optional[int] = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : str = max_position_embeddings
A_ : Any = type_vocab_size
A_ : Union[str, Any] = initializer_range
A_ : Tuple = layer_norm_eps
A_ : Union[str, Any] = position_embedding_type
A_ : Any = use_cache
A_ : Optional[int] = classifier_dropout
# additional properties
A_ : List[Any] = max_depth
A_ : Any = max_xpath_tag_unit_embeddings
A_ : Tuple = max_xpath_subs_unit_embeddings
A_ : str = tag_pad_id
A_ : List[Any] = subs_pad_id
A_ : Optional[int] = xpath_unit_hidden_size
| 665 |
'''simple docstring'''
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__magic_name__ = 16
__magic_name__ = 32
def lowerCamelCase ( lowerCamelCase : Accelerator , lowerCamelCase : int = 16):
A_ : Any = AutoTokenizer.from_pretrained("""bert-base-cased""")
A_ : str = load_dataset("""glue""" , """mrpc""")
def tokenize_function(lowerCamelCase : Dict):
# max_length=None => use the model max length (it's actually the default)
A_ : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase , max_length=lowerCamelCase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A_ : Tuple = datasets.map(
lowerCamelCase , batched=lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A_ : List[str] = tokenized_datasets.rename_column("""label""" , """labels""")
def collate_fn(lowerCamelCase : Tuple):
# On TPU it's best to pad everything to the same length or training will be very slow.
A_ : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A_ : List[Any] = 16
elif accelerator.mixed_precision != "no":
A_ : Any = 8
else:
A_ : Tuple = None
return tokenizer.pad(
lowerCamelCase , padding="""longest""" , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
A_ : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase , drop_last=lowerCamelCase)
A_ : str = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase , drop_last=(accelerator.mixed_precision == """fp8""") , )
return train_dataloader, eval_dataloader
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Dict):
# Initialize accelerator
A_ : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A_ : List[Any] = config["""lr"""]
A_ : List[Any] = int(config["""num_epochs"""])
A_ : int = int(config["""seed"""])
A_ : Dict = int(config["""batch_size"""])
A_ : Union[str, Any] = evaluate.load("""glue""" , """mrpc""")
# If the batch size is too big we use gradient accumulation
A_ : int = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A_ : Any = batch_size // MAX_GPU_BATCH_SIZE
A_ : Union[str, Any] = MAX_GPU_BATCH_SIZE
set_seed(lowerCamelCase)
A_ , A_ : List[str] = get_dataloaders(lowerCamelCase , lowerCamelCase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCamelCase)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A_ : str = model.to(accelerator.device)
# Instantiate optimizer
A_ : str = AdamW(params=model.parameters() , lr=lowerCamelCase)
# Instantiate scheduler
A_ : Tuple = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(lowerCamelCase) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A_ , A_ , A_ , A_ , A_ : Union[str, Any] = accelerator.prepare(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Now we train the model
for epoch in range(lowerCamelCase):
model.train()
for step, batch in enumerate(lowerCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
A_ : Optional[int] = model(**lowerCamelCase)
A_ : List[Any] = outputs.loss
A_ : Tuple = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
A_ : Union[str, Any] = model(**lowerCamelCase)
A_ : Any = outputs.logits.argmax(dim=-1)
A_ , A_ : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""]))
metric.add_batch(
predictions=lowerCamelCase , references=lowerCamelCase , )
A_ : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowerCamelCase)
def lowerCamelCase ( ):
A_ : Optional[int] = argparse.ArgumentParser(description="""Simple example of training script.""")
parser.add_argument(
"""--mixed_precision""" , type=lowerCamelCase , default=lowerCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""")
A_ : Dict = parser.parse_args()
A_ : Dict = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowerCamelCase , lowerCamelCase)
if __name__ == "__main__":
main()
| 665 | 1 |
'''simple docstring'''
from random import shuffle
import tensorflow as tf
from numpy import array
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : int):
A_ : Tuple = int(lowerCamelCase)
assert noofclusters < len(lowerCamelCase)
# Find out the dimensionality
A_ : Any = len(vectors[0])
# Will help select random centroids from among the available vectors
A_ : Dict = list(range(len(lowerCamelCase)))
shuffle(lowerCamelCase)
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
A_ : Optional[Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
A_ : Union[str, Any] = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
A_ : str = [
tf.Variable(vectors[vector_indices[i]]) for i in range(lowerCamelCase)
]
##These nodes will assign the centroid Variables the appropriate
##values
A_ : Dict = tf.placeholder("""float64""" , [dim])
A_ : Tuple = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowerCamelCase , lowerCamelCase))
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
A_ : Tuple = [tf.Variable(0) for i in range(len(lowerCamelCase))]
##These nodes will assign an assignment Variable the appropriate
##value
A_ : Optional[Any] = tf.placeholder("""int32""")
A_ : Any = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowerCamelCase , lowerCamelCase))
##Now lets construct the node that will compute the mean
# The placeholder for the input
A_ : Dict = tf.placeholder("""float""" , [None, dim])
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
A_ : Optional[Any] = tf.reduce_mean(lowerCamelCase , 0)
##Node for computing Euclidean distances
# Placeholders for input
A_ : Optional[Any] = tf.placeholder("""float""" , [dim])
A_ : int = tf.placeholder("""float""" , [dim])
A_ : int = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowerCamelCase , lowerCamelCase) , 2)))
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
A_ : List[str] = tf.placeholder("""float""" , [noofclusters])
A_ : int = tf.argmin(lowerCamelCase , 0)
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
A_ : int = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowerCamelCase)
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
A_ : Dict = 100
for _ in range(lowerCamelCase):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowerCamelCase)):
A_ : List[Any] = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
A_ : int = [
sess.run(lowerCamelCase , feed_dict={va: vect, va: sess.run(lowerCamelCase)})
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
A_ : Optional[int] = sess.run(
lowerCamelCase , feed_dict={centroid_distances: distances})
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment})
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowerCamelCase):
# Collect all the vectors assigned to this cluster
A_ : List[str] = [
vectors[i]
for i in range(len(lowerCamelCase))
if sess.run(assignments[i]) == cluster_n
]
# Compute new centroid location
A_ : str = sess.run(
lowerCamelCase , feed_dict={mean_input: array(lowerCamelCase)})
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location})
# Return centroids and assignments
A_ : List[str] = sess.run(lowerCamelCase)
A_ : Optional[int] = sess.run(lowerCamelCase)
return centroids, assignments
| 665 |
'''simple docstring'''
import functools
def lowerCamelCase ( lowerCamelCase : list[int] , lowerCamelCase : list[int]):
# Validation
if not isinstance(lowerCamelCase , lowerCamelCase) or not all(isinstance(lowerCamelCase , lowerCamelCase) for day in days):
raise ValueError("""The parameter days should be a list of integers""")
if len(lowerCamelCase) != 3 or not all(isinstance(lowerCamelCase , lowerCamelCase) for cost in costs):
raise ValueError("""The parameter costs should be a list of three integers""")
if len(lowerCamelCase) == 0:
return 0
if min(lowerCamelCase) <= 0:
raise ValueError("""All days elements should be greater than 0""")
if max(lowerCamelCase) >= 366:
raise ValueError("""All days elements should be less than 366""")
A_ : Tuple = set(lowerCamelCase)
@functools.cache
def dynamic_programming(lowerCamelCase : int) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1)
return min(
costs[0] + dynamic_programming(index + 1) , costs[1] + dynamic_programming(index + 7) , costs[2] + dynamic_programming(index + 30) , )
return dynamic_programming(1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def lowerCamelCase ( lowerCamelCase : Optional[Any]):
A_ : Any = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase , lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : Any):
A_ , A_ : Dict = emb.weight.shape
A_ : str = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase)
A_ : str = emb.weight.data
return lin_layer
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
A_ : Optional[int] = torch.load(lowerCamelCase , map_location="""cpu""")
A_ : Optional[Any] = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""]
A_ : Optional[Any] = mam_aaa["""model"""]
remove_ignore_keys_(lowerCamelCase)
A_ : Optional[int] = state_dict["""encoder.embed_tokens.weight"""].shape[0]
A_ : int = MaMaaaConfig(
vocab_size=lowerCamelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , )
A_ : List[Any] = state_dict["""decoder.embed_tokens.weight"""]
A_ : Optional[int] = MaMaaaForConditionalGeneration(lowerCamelCase)
model.model.load_state_dict(lowerCamelCase , strict=lowerCamelCase)
A_ : Tuple = make_linear_from_emb(model.model.shared)
return model
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
__magic_name__ = parser.parse_args()
__magic_name__ = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 665 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def lowerCamelCase ( lowerCamelCase : NDArray[floataa] , lowerCamelCase : NDArray[floataa] , lowerCamelCase : list[int] , lowerCamelCase : int , ):
A_ , A_ : int = coefficient_matrix.shape
A_ , A_ : Union[str, Any] = constant_matrix.shape
if rowsa != colsa:
A_ : Any = F'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'
raise ValueError(lowerCamelCase)
if colsa != 1:
A_ : Tuple = F'Constant matrix must be nx1 but received {rowsa}x{colsa}'
raise ValueError(lowerCamelCase)
if rowsa != rowsa:
A_ : Dict = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
F'received {rowsa}x{colsa} and {rowsa}x{colsa}'
)
raise ValueError(lowerCamelCase)
if len(lowerCamelCase) != rowsa:
A_ : Union[str, Any] = (
"""Number of initial values must be equal to number of rows in coefficient """
F'matrix but received {len(lowerCamelCase)} and {rowsa}'
)
raise ValueError(lowerCamelCase)
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""")
A_ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1)
A_ , A_ : int = table.shape
strictly_diagonally_dominant(lowerCamelCase)
# Iterates the whole matrix for given number of times
for _ in range(lowerCamelCase):
A_ : List[Any] = []
for row in range(lowerCamelCase):
A_ : int = 0
for col in range(lowerCamelCase):
if col == row:
A_ : List[str] = table[row][col]
elif col == cols - 1:
A_ : str = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
A_ : Union[str, Any] = (temp + val) / denom
new_val.append(lowerCamelCase)
A_ : Tuple = new_val
return [float(lowerCamelCase) for i in new_val]
def lowerCamelCase ( lowerCamelCase : NDArray[floataa]):
A_ , A_ : Dict = table.shape
A_ : Union[str, Any] = True
for i in range(0 , lowerCamelCase):
A_ : str = 0
for j in range(0 , cols - 1):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""")
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ,_a : int ):
'''simple docstring'''
A_ : int = size
A_ : Dict = [0] * size
A_ : Any = [0] * size
@staticmethod
def _a ( _a : int ):
'''simple docstring'''
return index | (index + 1)
@staticmethod
def _a ( _a : int ):
'''simple docstring'''
return (index & (index + 1)) - 1
def _a ( self : List[Any] ,_a : int ,_a : int ):
'''simple docstring'''
A_ : Tuple = value
while index < self.size:
A_ : Optional[int] = self.get_prev(_a ) + 1
if current_left_border == index:
A_ : Dict = value
else:
A_ : Tuple = max(_a ,_a ,_a )
A_ : List[str] = self.get_next(_a )
def _a ( self : List[str] ,_a : int ,_a : int ):
'''simple docstring'''
right -= 1 # Because of right is exclusive
A_ : List[Any] = 0
while left <= right:
A_ : List[str] = self.get_prev(_a )
if left <= current_left:
A_ : int = max(_a ,self.tree[right] )
A_ : int = current_left
else:
A_ : str = max(_a ,self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str):
A_ : Any = len(lowerCamelCase)
A_ : Optional[Any] = len(lowerCamelCase)
A_ : Optional[int] = [[False for _ in range(m + 1)] for _ in range(n + 1)]
A_ : Union[str, Any] = True
for i in range(lowerCamelCase):
for j in range(m + 1):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
A_ : Optional[int] = True
if a[i].islower():
A_ : List[Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowerCamelCase ( lowerCamelCase : int):
if not isinstance(lowerCamelCase , lowerCamelCase):
raise TypeError("""Undefined for non-integers""")
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""")
A_ : Optional[int] = precision
A_ : Optional[int] = ceil(precision / 14)
A_ : Optional[int] = 42_6880 * Decimal(1_0005).sqrt()
A_ : int = 1
A_ : str = 1359_1409
A_ : int = Decimal(lowerCamelCase)
for k in range(1 , lowerCamelCase):
A_ : List[Any] = factorial(6 * k) // (factorial(3 * k) * factorial(lowerCamelCase) ** 3)
linear_term += 5_4514_0134
exponential_term *= -26_2537_4126_4076_8000
partial_sum += Decimal(multinomial_term * linear_term) / exponential_term
return str(constant_term / partial_sum)[:-1]
if __name__ == "__main__":
__magic_name__ = 50
print(f"""The first {n} digits of pi is: {pi(n)}""")
| 665 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
a_ = 42
a_ = 42
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ,_a : int ):
'''simple docstring'''
A_ : list[list[Edge]] = [[] for _ in range(_a )]
A_ : List[Any] = size
def __getitem__( self : int ,_a : int ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def _a ( self : str ):
'''simple docstring'''
return self._size
def _a ( self : str ,_a : int ,_a : int ,_a : int ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(_a ,_a ) )
def _a ( self : Dict ,_a : int ,_a : int ):
'''simple docstring'''
A_ : Tuple = deque([start_vertex] )
A_ : list[int | None] = [None] * self.size
A_ : Union[str, Any] = 0
while queue:
A_ : List[Any] = queue.popleft()
A_ : Tuple = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
A_ : Union[str, Any] = current_distance + edge.weight
A_ : Optional[Any] = distances[edge.destination_vertex]
if (
isinstance(_a ,_a )
and new_distance >= dest_vertex_distance
):
continue
A_ : Tuple = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = 42
a_ = 42
def lowerCamelCase ( lowerCamelCase : str):
if not isinstance(lowerCamelCase , lowerCamelCase):
raise TypeError("""The parameter s type must be str.""")
return [s[i:] + s[:i] for i in range(len(lowerCamelCase))]
def lowerCamelCase ( lowerCamelCase : str):
if not isinstance(lowerCamelCase , lowerCamelCase):
raise TypeError("""The parameter s type must be str.""")
if not s:
raise ValueError("""The parameter s must not be empty.""")
A_ : Any = all_rotations(lowerCamelCase)
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
A_ : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations]),
"idx_original_string": rotations.index(lowerCamelCase),
}
return response
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : int):
if not isinstance(lowerCamelCase , lowerCamelCase):
raise TypeError("""The parameter bwt_string type must be str.""")
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""")
try:
A_ : List[Any] = int(lowerCamelCase)
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""")
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""")
if idx_original_string >= len(lowerCamelCase):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""")
A_ : Any = [""""""] * len(lowerCamelCase)
for _ in range(len(lowerCamelCase)):
for i in range(len(lowerCamelCase)):
A_ : int = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
__magic_name__ = 'Provide a string that I will generate its BWT transform: '
__magic_name__ = input(entry_msg).strip()
__magic_name__ = bwt_transform(s)
print(
f"""Burrows Wheeler transform for string '{s}' results """
f"""in '{result['bwt_string']}'"""
)
__magic_name__ = reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
f"""Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' """
f"""we get original string '{original_string}'"""
)
| 665 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : int = 10**9):
A_ : Optional[int] = 1
A_ : int = 2
A_ : List[Any] = 0
A_ : Optional[Any] = 0
A_ : str = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
A_ : Optional[Any] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 665 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def lowerCamelCase ( lowerCamelCase : List[str]):
if "model" in orig_key:
A_ : List[str] = orig_key.replace("""model.""" , """""")
if "norm1" in orig_key:
A_ : List[str] = orig_key.replace("""norm1""" , """attention.output.LayerNorm""")
if "norm2" in orig_key:
A_ : str = orig_key.replace("""norm2""" , """output.LayerNorm""")
if "norm" in orig_key:
A_ : List[Any] = orig_key.replace("""norm""" , """LayerNorm""")
if "transformer" in orig_key:
A_ : Optional[int] = orig_key.split(""".""")[0].split("""_""")[-1]
A_ : Optional[int] = orig_key.replace(F'transformer_{layer_num}' , F'encoder.layer.{layer_num}')
if "mha.attn" in orig_key:
A_ : int = orig_key.replace("""mha.attn""" , """attention.self""")
if "mha" in orig_key:
A_ : int = orig_key.replace("""mha""" , """attention""")
if "W_q" in orig_key:
A_ : List[str] = orig_key.replace("""W_q""" , """self.query""")
if "W_k" in orig_key:
A_ : Optional[Any] = orig_key.replace("""W_k""" , """self.key""")
if "W_v" in orig_key:
A_ : List[Any] = orig_key.replace("""W_v""" , """self.value""")
if "ff1" in orig_key:
A_ : Tuple = orig_key.replace("""ff1""" , """intermediate.dense""")
if "ff2" in orig_key:
A_ : List[Any] = orig_key.replace("""ff2""" , """output.dense""")
if "ff" in orig_key:
A_ : Dict = orig_key.replace("""ff""" , """output.dense""")
if "mlm_class" in orig_key:
A_ : Tuple = orig_key.replace("""mlm.mlm_class""" , """cls.predictions.decoder""")
if "mlm" in orig_key:
A_ : Dict = orig_key.replace("""mlm""" , """cls.predictions.transform""")
if "cls" not in orig_key:
A_ : List[Any] = """yoso.""" + orig_key
return orig_key
def lowerCamelCase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : str):
for key in orig_state_dict.copy().keys():
A_ : Optional[int] = orig_state_dict.pop(lowerCamelCase)
if ("pooler" in key) or ("sen_class" in key):
continue
else:
A_ : Union[str, Any] = val
A_ : Any = orig_state_dict["""cls.predictions.decoder.bias"""]
A_ : List[Any] = torch.arange(lowerCamelCase).expand((1, -1)) + 2
return orig_state_dict
def lowerCamelCase ( lowerCamelCase : Dict , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple):
A_ : Optional[Any] = torch.load(lowerCamelCase , map_location="""cpu""")["""model_state_dict"""]
A_ : List[str] = YosoConfig.from_json_file(lowerCamelCase)
A_ : Tuple = YosoForMaskedLM(lowerCamelCase)
A_ : List[str] = convert_checkpoint_helper(config.max_position_embeddings , lowerCamelCase)
print(model.load_state_dict(lowerCamelCase))
model.eval()
model.save_pretrained(lowerCamelCase)
print(F'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}')
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for YOSO model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__magic_name__ = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 665 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def lowerCamelCase ( ):
A_ : Optional[int] = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=lowerCamelCase)
A_ : Optional[int] = parser.add_subparsers(help="""accelerate command helpers""")
# Register commands
get_config_parser(subparsers=lowerCamelCase)
env_command_parser(subparsers=lowerCamelCase)
launch_command_parser(subparsers=lowerCamelCase)
tpu_command_parser(subparsers=lowerCamelCase)
test_command_parser(subparsers=lowerCamelCase)
# Let's go
A_ : Dict = parser.parse_args()
if not hasattr(lowerCamelCase , """func"""):
parser.print_help()
exit(1)
# Run
args.func(lowerCamelCase)
if __name__ == "__main__":
main()
| 665 | 1 |
'''simple docstring'''
import torch
from diffusers import DiffusionPipeline
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[str] ,_a : int ,_a : List[Any] ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_a ,scheduler=_a )
def __call__( self : Any ):
'''simple docstring'''
A_ : List[str] = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) ,)
A_ : Tuple = 1
A_ : List[str] = self.unet(_a ,_a ).sample
A_ : int = self.scheduler.step(_a ,_a ,_a ).prev_sample
A_ : Tuple = scheduler_output - scheduler_output + torch.ones_like(_a )
return result
| 665 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 1 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__magic_name__ = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__magic_name__ = 128_022
__magic_name__ = 128_028
@require_sentencepiece
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = MaMaaaTokenizer
a_ = False
a_ = False
a_ = True
def _a ( self : Tuple ):
'''simple docstring'''
super().setUp()
A_ : Optional[int] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
A_ : Dict = dict(zip(_a ,range(len(_a ) ) ) )
A_ : List[Any] = Path(self.tmpdirname )
save_json(_a ,save_dir / VOCAB_FILES_NAMES["""vocab_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_a ,save_dir / VOCAB_FILES_NAMES["""spm_file"""] )
A_ : Any = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _a ( self : str ,**_a : Dict ):
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname ,**_a )
def _a ( self : Any ,_a : Union[str, Any] ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : List[str] = """</s>"""
A_ : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) ,_a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) ,_a )
def _a ( self : str ):
'''simple docstring'''
A_ : Optional[Any] = self.get_tokenizer()
A_ : str = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""</s>""" )
self.assertEqual(vocab_keys[1] ,"""<unk>""" )
self.assertEqual(vocab_keys[-1] ,"""<s>""" )
self.assertEqual(len(_a ) ,tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("""Skip this test while all models are still to be uploaded.""" )
def _a ( self : Any ):
'''simple docstring'''
pass
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Any = self.get_tokenizer()
A_ : Dict = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_a ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) ,[2, 3, 4, 5, 6] ,)
A_ : Dict = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(_a ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
A_ : Union[str, Any] = tokenizer.convert_tokens_to_string(_a )
self.assertEqual(_a ,"""This is a test""" )
@slow
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Union[str, Any] = {"""input_ids""": [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a ,model_name="""facebook/m2m100_418M""" ,revision="""c168bae485c864188cf9aa0e4108b0b6934dc91e""" ,)
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
a_ = """facebook/m2m100_418M"""
a_ = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
a_ = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
a_ = [EN_CODE, 593, 1_949, 115_781, 4, 71_586, 4_234, 60_633, 126_233, 432, 123_808, 15_592, 1_197, 117_132, 120_618, 5, 2]
@classmethod
def _a ( cls : Optional[Any] ):
'''simple docstring'''
A_ : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name ,src_lang="""en""" ,tgt_lang="""fr""" )
A_ : Optional[Any] = 1
return cls
def _a ( self : List[str] ):
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id("""ar""" ) ,128006 )
self.assertEqual(self.tokenizer.get_lang_id("""en""" ) ,128022 )
self.assertEqual(self.tokenizer.get_lang_id("""ro""" ) ,128076 )
self.assertEqual(self.tokenizer.get_lang_id("""mr""" ) ,128063 )
def _a ( self : Dict ):
'''simple docstring'''
A_ : Any = self.tokenizer.get_vocab()
self.assertEqual(len(_a ) ,self.tokenizer.vocab_size )
self.assertEqual(vocab["""<unk>"""] ,3 )
self.assertIn(self.tokenizer.get_lang_token("""en""" ) ,_a )
def _a ( self : int ):
'''simple docstring'''
A_ : str = """en"""
A_ : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens ,_a )
def _a ( self : Optional[Any] ):
'''simple docstring'''
self.assertIn(_a ,self.tokenizer.all_special_ids )
# fmt: off
A_ : Tuple = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2]
# fmt: on
A_ : Any = self.tokenizer.decode(_a ,skip_special_tokens=_a )
A_ : str = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=_a )
self.assertEqual(_a ,_a )
self.assertNotIn(self.tokenizer.eos_token ,_a )
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = tempfile.mkdtemp()
A_ : List[Any] = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(_a )
A_ : Union[str, Any] = MaMaaaTokenizer.from_pretrained(_a )
self.assertDictEqual(new_tok.lang_token_to_id ,_a )
@require_torch
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : List[Any] = """en"""
A_ : Union[str, Any] = """fr"""
A_ : int = self.tokenizer(self.src_text ,text_target=self.tgt_text ,padding=_a ,return_tensors="""pt""" )
A_ : List[str] = shift_tokens_right(
batch["""labels"""] ,self.tokenizer.pad_token_id ,self.tokenizer.eos_token_id )
for k in batch:
A_ : Dict = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def _a ( self : Dict ):
'''simple docstring'''
A_ : List[Any] = """mr"""
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id("""mr""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
A_ : str = """zh"""
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id("""zh""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
@require_torch
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Optional[int] = """mr"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id("""mr""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
A_ : Optional[int] = """zh"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id("""zh""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Union[str, Any] = self.tokenizer._build_translation_inputs("""A test""" ,return_tensors="""pt""" ,src_lang="""en""" ,tgt_lang="""ar""" )
self.assertEqual(
nested_simplify(_a ) ,{
# en_XX, A, test, EOS
"""input_ids""": [[128022, 58, 4183, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 128006,
} ,)
| 665 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['YolosFeatureExtractor']
__magic_name__ = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 1 |
'''simple docstring'''
__magic_name__ = [
(1_000, 'M'),
(900, 'CM'),
(500, 'D'),
(400, 'CD'),
(100, 'C'),
(90, 'XC'),
(50, 'L'),
(40, 'XL'),
(10, 'X'),
(9, 'IX'),
(5, 'V'),
(4, 'IV'),
(1, 'I'),
]
def lowerCamelCase ( lowerCamelCase : str):
A_ : Union[str, Any] = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1000}
A_ : Dict = 0
A_ : Union[str, Any] = 0
while place < len(lowerCamelCase):
if (place + 1 < len(lowerCamelCase)) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def lowerCamelCase ( lowerCamelCase : int):
A_ : Optional[int] = []
for arabic, roman in ROMAN:
((A_) , (A_)) : Optional[Any] = divmod(lowerCamelCase , lowerCamelCase)
result.append(roman * factor)
if number == 0:
break
return "".join(lowerCamelCase)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__magic_name__ = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : Tuple):
A_ : str = [0] * len(lowerCamelCase)
A_ : Union[str, Any] = []
A_ : Union[str, Any] = []
A_ : Tuple = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowerCamelCase)):
if indegree[i] == 0:
queue.append(lowerCamelCase)
while queue:
A_ : Any = queue.pop(0)
cnt += 1
topo.append(lowerCamelCase)
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(lowerCamelCase)
if cnt != len(lowerCamelCase):
print("""Cycle exists""")
else:
print(lowerCamelCase)
# Adjacency List of Graph
__magic_name__ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 665 | 1 |
'''simple docstring'''
__magic_name__ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def lowerCamelCase ( lowerCamelCase : dict , lowerCamelCase : str , lowerCamelCase : Optional[Any]):
A_ : List[Any] = set()
# keep track of all the paths to be checked
A_ : List[str] = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
A_ : Union[str, Any] = queue.pop(0)
# get the last node from the path
A_ : Any = path[-1]
if node not in explored:
A_ : Optional[int] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
A_ : Tuple = list(lowerCamelCase)
new_path.append(lowerCamelCase)
queue.append(lowerCamelCase)
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(lowerCamelCase)
# in case there's no path between the 2 nodes
return []
def lowerCamelCase ( lowerCamelCase : dict , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[int]):
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
A_ : Optional[int] = [start]
A_ : List[str] = set(lowerCamelCase)
# Keep tab on distances from `start` node.
A_ : Optional[int] = {start: 0, target: -1}
while queue:
A_ : List[str] = queue.pop(0)
if node == target:
A_ : List[Any] = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node])
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(lowerCamelCase)
queue.append(lowerCamelCase)
A_ : Union[str, Any] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
| 665 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ,_a : List[Any] ,_a : Dict=13 ,_a : List[str]=7 ,_a : Dict=True ,_a : List[Any]=True ,_a : Dict=False ,_a : Optional[int]=True ,_a : List[Any]=99 ,_a : Any=32 ,_a : Optional[int]=5 ,_a : List[Any]=4 ,_a : int=37 ,_a : List[Any]="gelu" ,_a : List[str]=0.1 ,_a : Union[str, Any]=0.1 ,_a : Any=512 ,_a : int=16 ,_a : Optional[int]=2 ,_a : Any=0.02 ,_a : Any=3 ,_a : Any=4 ,_a : List[str]=None ,):
'''simple docstring'''
A_ : List[str] = parent
A_ : Any = batch_size
A_ : Tuple = seq_length
A_ : List[str] = is_training
A_ : Tuple = use_input_mask
A_ : Dict = use_token_type_ids
A_ : List[Any] = use_labels
A_ : Union[str, Any] = vocab_size
A_ : Any = hidden_size
A_ : str = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : str = intermediate_size
A_ : Tuple = hidden_act
A_ : Any = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : List[str] = max_position_embeddings
A_ : int = type_vocab_size
A_ : Union[str, Any] = type_sequence_label_size
A_ : Any = initializer_range
A_ : List[Any] = num_labels
A_ : Optional[Any] = num_choices
A_ : List[Any] = scope
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A_ : int = None
if self.use_input_mask:
A_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : Dict = None
if self.use_token_type_ids:
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A_ : str = None
A_ : Any = None
A_ : str = None
if self.use_labels:
A_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A_ : Optional[int] = ids_tensor([self.batch_size] ,self.num_choices )
A_ : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : Optional[Any] ):
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_a ,initializer_range=self.initializer_range ,)
def _a ( self : Union[str, Any] ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Any ,_a : Any ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Tuple ):
'''simple docstring'''
A_ : Any = LlamaModel(config=_a )
model.to(_a )
model.eval()
A_ : Optional[Any] = model(_a ,attention_mask=_a )
A_ : Optional[int] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Optional[int] ,_a : int ,_a : List[str] ,_a : Any ,_a : Any ,_a : Dict ,_a : List[str] ,_a : Optional[int] ,_a : Any ,_a : List[str] ,):
'''simple docstring'''
A_ : List[str] = True
A_ : Union[str, Any] = LlamaModel(_a )
model.to(_a )
model.eval()
A_ : Tuple = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,)
A_ : List[Any] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,)
A_ : int = model(_a ,attention_mask=_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Any ,_a : Any ,_a : Optional[int] ,_a : List[Any] ,_a : List[Any] ,_a : Dict ,_a : Tuple ,_a : Optional[int] ,_a : List[Any] ,_a : Union[str, Any] ,):
'''simple docstring'''
A_ : List[Any] = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
A_ : Dict = model(_a ,attention_mask=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : str ,_a : List[Any] ,_a : Dict ,_a : str ,_a : Tuple ,_a : Tuple ,_a : Tuple ,_a : Optional[Any] ,_a : Dict ,_a : Union[str, Any] ,):
'''simple docstring'''
A_ : Optional[Any] = True
A_ : Any = True
A_ : Tuple = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
# first forward pass
A_ : Optional[int] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,use_cache=_a ,)
A_ : Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A_ : int = ids_tensor((self.batch_size, 3) ,config.vocab_size )
A_ : List[Any] = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
A_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 )
A_ : int = torch.cat([input_mask, next_mask] ,dim=-1 )
A_ : List[str] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,output_hidden_states=_a ,)["""hidden_states"""][0]
A_ : Any = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,past_key_values=_a ,output_hidden_states=_a ,)["""hidden_states"""][0]
# select random slice
A_ : List[str] = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
A_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
A_ : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a ,_a ,atol=1e-3 ) )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Any = config_and_inputs
A_ : int = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
a_ = (LlamaForCausalLM,) if is_torch_available() else ()
a_ = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ = False
a_ = False
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = LlamaModelTester(self )
A_ : List[str] = ConfigTester(self ,config_class=_a ,hidden_size=37 )
def _a ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A_ : Dict = type
self.model_tester.create_and_check_model(*_a )
def _a ( self : List[Any] ):
'''simple docstring'''
A_ , A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[str] = 3
A_ : Any = input_dict["""input_ids"""]
A_ : Union[str, Any] = input_ids.ne(1 ).to(_a )
A_ : Union[str, Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
A_ : List[Any] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : int = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : Dict ):
'''simple docstring'''
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : str = 3
A_ : Union[str, Any] = """single_label_classification"""
A_ : Union[str, Any] = input_dict["""input_ids"""]
A_ : List[Any] = input_ids.ne(1 ).to(_a )
A_ : Dict = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
A_ : List[Any] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : List[str] = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Dict = 3
A_ : Dict = """multi_label_classification"""
A_ : Any = input_dict["""input_ids"""]
A_ : Optional[Any] = input_ids.ne(1 ).to(_a )
A_ : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
A_ : Optional[int] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : Any = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" )
def _a ( self : Any ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _a ( self : Optional[Any] ,_a : List[Any] ):
'''simple docstring'''
A_ , A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Tuple = ids_tensor([1, 10] ,config.vocab_size )
A_ : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A_ : int = LlamaModel(_a )
original_model.to(_a )
original_model.eval()
A_ : Tuple = original_model(_a ).last_hidden_state
A_ : Union[str, Any] = original_model(_a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A_ : Tuple = {"""type""": scaling_type, """factor""": 10.0}
A_ : int = LlamaModel(_a )
scaled_model.to(_a )
scaled_model.eval()
A_ : List[Any] = scaled_model(_a ).last_hidden_state
A_ : Any = scaled_model(_a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_a ,_a ,atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_a ,_a ,atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_a ,_a ,atol=1e-5 ) )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Any = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : List[str] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" ,device_map="""auto""" )
A_ : str = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
A_ : Union[str, Any] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : str = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : str ):
'''simple docstring'''
A_ : Dict = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" ,device_map="""auto""" )
A_ : Tuple = model(torch.tensor(_a ) )
# Expected mean on dim = -1
A_ : str = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : str = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" ,device_map="""auto""" )
A_ : int = model(torch.tensor(_a ) )
# Expected mean on dim = -1
A_ : Union[str, Any] = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : Optional[int] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
@unittest.skip(
"""Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" )
@slow
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : str = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" ,device_map="""auto""" )
A_ : Tuple = model(torch.tensor(_a ) )
A_ : Dict = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# fmt: off
A_ : List[str] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Model is curently gated""" )
@slow
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Union[str, Any] = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
A_ : List[str] = """Simply put, the theory of relativity states that """
A_ : Any = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" )
A_ : Union[str, Any] = tokenizer.encode(_a ,return_tensors="""pt""" )
A_ : List[str] = LlamaForCausalLM.from_pretrained(
"""meta-llama/Llama-2-13b-chat-hf""" ,device_map="""sequential""" ,use_safetensors=_a )
# greedy generation outputs
A_ : str = model.generate(_a ,max_new_tokens=64 ,top_p=_a ,temperature=1 ,do_sample=_a )
A_ : Optional[Any] = tokenizer.decode(generated_ids[0] ,skip_special_tokens=_a )
self.assertEqual(_a ,_a )
| 665 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
def lowerCamelCase ( lowerCamelCase : Dict , lowerCamelCase : Any=False):
A_ : List[str] = []
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight'))
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias'))
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight'))
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias'))
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight'))
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias'))
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight'))
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias'))
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight'))
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias'))
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
])
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
])
# if just the base model, we should remove "vit" from all keys that start with "vit"
A_ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""") else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
])
return rename_keys
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[Any]=False):
for i in range(config.num_hidden_layers):
if base_model:
A_ : Any = """"""
else:
A_ : Tuple = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ : Tuple = state_dict.pop(F'blocks.{i}.attn.qkv.weight')
A_ : Union[str, Any] = state_dict.pop(F'blocks.{i}.attn.qkv.bias')
# next, add query, keys and values (in that order) to the state dict
A_ : Optional[int] = in_proj_weight[
: config.hidden_size, :
]
A_ : Dict = in_proj_bias[: config.hidden_size]
A_ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
A_ : str = in_proj_bias[-config.hidden_size :]
def lowerCamelCase ( lowerCamelCase : List[str]):
A_ : str = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowerCamelCase , lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : List[Any] , lowerCamelCase : List[str] , lowerCamelCase : Optional[int]):
A_ : Union[str, Any] = dct.pop(lowerCamelCase)
A_ : List[str] = val
def lowerCamelCase ( ):
A_ : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : int = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase).raw)
return im
@torch.no_grad()
def lowerCamelCase ( lowerCamelCase : Dict , lowerCamelCase : Dict):
A_ : Dict = ViTConfig()
A_ : Tuple = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
A_ : str = True
A_ : Dict = int(vit_name[-12:-10])
A_ : List[Any] = int(vit_name[-9:-6])
else:
A_ : List[str] = 1000
A_ : str = """huggingface/label-files"""
A_ : int = """imagenet-1k-id2label.json"""
A_ : Dict = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""") , """r"""))
A_ : List[Any] = {int(lowerCamelCase): v for k, v in idalabel.items()}
A_ : str = idalabel
A_ : str = {v: k for k, v in idalabel.items()}
A_ : Tuple = int(vit_name[-6:-4])
A_ : Optional[Any] = int(vit_name[-3:])
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("""tiny"""):
A_ : str = 192
A_ : List[Any] = 768
A_ : str = 12
A_ : List[Any] = 3
elif vit_name[9:].startswith("""small"""):
A_ : Optional[int] = 384
A_ : Optional[Any] = 1536
A_ : Any = 12
A_ : List[Any] = 6
else:
pass
else:
if vit_name[4:].startswith("""small"""):
A_ : Optional[Any] = 768
A_ : Any = 2304
A_ : Union[str, Any] = 8
A_ : Any = 8
elif vit_name[4:].startswith("""base"""):
pass
elif vit_name[4:].startswith("""large"""):
A_ : List[str] = 1024
A_ : Union[str, Any] = 4096
A_ : Optional[Any] = 24
A_ : List[Any] = 16
elif vit_name[4:].startswith("""huge"""):
A_ : List[str] = 1280
A_ : str = 5120
A_ : Optional[int] = 32
A_ : List[str] = 16
# load original model from timm
A_ : int = timm.create_model(lowerCamelCase , pretrained=lowerCamelCase)
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A_ : int = timm_model.state_dict()
if base_model:
remove_classification_head_(lowerCamelCase)
A_ : int = create_rename_keys(lowerCamelCase , lowerCamelCase)
for src, dest in rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase)
read_in_q_k_v(lowerCamelCase , lowerCamelCase , lowerCamelCase)
# load HuggingFace model
if vit_name[-5:] == "in21k":
A_ : str = ViTModel(lowerCamelCase).eval()
else:
A_ : Optional[Any] = ViTForImageClassification(lowerCamelCase).eval()
model.load_state_dict(lowerCamelCase)
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
A_ : Optional[int] = DeiTImageProcessor(size=config.image_size)
else:
A_ : str = ViTImageProcessor(size=config.image_size)
A_ : Optional[Any] = image_processor(images=prepare_img() , return_tensors="""pt""")
A_ : Optional[int] = encoding["""pixel_values"""]
A_ : List[Any] = model(lowerCamelCase)
if base_model:
A_ : int = timm_model.forward_features(lowerCamelCase)
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowerCamelCase , outputs.pooler_output , atol=1E-3)
else:
A_ : Tuple = timm_model(lowerCamelCase)
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCamelCase , outputs.logits , atol=1E-3)
Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase)
print(F'Saving model {vit_name} to {pytorch_dump_folder_path}')
model.save_pretrained(lowerCamelCase)
print(F'Saving image processor to {pytorch_dump_folder_path}')
image_processor.save_pretrained(lowerCamelCase)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__magic_name__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 665 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__magic_name__ = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
__magic_name__ = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
__magic_name__ = r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def _a ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ),
"""references""": datasets.Value("""string""" ),
} ) ,homepage="""https://github.com/hendrycks/math""" ,codebase_urls=["""https://github.com/hendrycks/math"""] ,)
def _a ( self : List[Any] ,_a : Union[str, Any] ,_a : Optional[int] ):
'''simple docstring'''
A_ : Union[str, Any] = 0.0
for i, j in zip(_a ,_a ):
n_correct += 1.0 if math_equivalence.is_equiv(_a ,_a ) else 0.0
A_ : List[str] = n_correct / len(_a )
return {
"accuracy": accuracy,
}
| 665 | 1 |
'''simple docstring'''
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Tuple = inspect.getfile(accelerate.test_utils )
A_ : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
A_ : Any = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def _a ( self : str ):
'''simple docstring'''
A_ : int = f'\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n '.split()
A_ : Optional[Any] = [sys.executable] + distributed_args
execute_subprocess_async(_a ,env=os.environ.copy() )
| 665 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__magic_name__ = logging.get_logger(__name__)
# TODO: upload to AWS
__magic_name__ = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """retribert"""
def __init__( self : int ,_a : Dict=30522 ,_a : List[Any]=768 ,_a : Optional[Any]=8 ,_a : str=12 ,_a : str=3072 ,_a : Tuple="gelu" ,_a : Optional[int]=0.1 ,_a : Dict=0.1 ,_a : List[Any]=512 ,_a : Union[str, Any]=2 ,_a : Tuple=0.02 ,_a : List[str]=1e-12 ,_a : Dict=True ,_a : Tuple=128 ,_a : Optional[int]=0 ,**_a : Tuple ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,**_a )
A_ : Dict = vocab_size
A_ : int = hidden_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : Tuple = hidden_act
A_ : int = intermediate_size
A_ : Tuple = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : int = max_position_embeddings
A_ : Any = type_vocab_size
A_ : Optional[int] = initializer_range
A_ : Dict = layer_norm_eps
A_ : str = share_encoders
A_ : List[Any] = projection_dim
| 665 | 1 |
'''simple docstring'''
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
__magic_name__ = 299_792_458
# Symbols
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = symbols('ct x y z')
def lowerCamelCase ( lowerCamelCase : float):
if velocity > c:
raise ValueError("""Speed must not exceed light speed 299,792,458 [m/s]!""")
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("""Speed must be greater than or equal to 1!""")
return velocity / c
def lowerCamelCase ( lowerCamelCase : float):
return 1 / sqrt(1 - beta(lowerCamelCase) ** 2)
def lowerCamelCase ( lowerCamelCase : float):
return np.array(
[
[gamma(lowerCamelCase), -gamma(lowerCamelCase) * beta(lowerCamelCase), 0, 0],
[-gamma(lowerCamelCase) * beta(lowerCamelCase), gamma(lowerCamelCase), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
])
def lowerCamelCase ( lowerCamelCase : float , lowerCamelCase : np.ndarray | None = None):
# Ensure event is not empty
if event is None:
A_ : Tuple = np.array([ct, x, y, z]) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(lowerCamelCase) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
__magic_name__ = transform(29_979_245)
print('Example of four vector: ')
print(f"""ct' = {four_vector[0]}""")
print(f"""x' = {four_vector[1]}""")
print(f"""y' = {four_vector[2]}""")
print(f"""z' = {four_vector[3]}""")
# Substitute symbols with numerical values
__magic_name__ = {ct: c, x: 1, y: 1, z: 1}
__magic_name__ = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f"""\n{numerical_vector}""")
| 665 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'spiece.model'}
__magic_name__ = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
__magic_name__ = {
'google/bigbird-roberta-base': 4_096,
'google/bigbird-roberta-large': 4_096,
'google/bigbird-base-trivia-itc': 4_096,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
a_ = []
def __init__( self : Optional[int] ,_a : int ,_a : Optional[Any]="<unk>" ,_a : int="<s>" ,_a : str="</s>" ,_a : Optional[Any]="<pad>" ,_a : Tuple="[SEP]" ,_a : Tuple="[MASK]" ,_a : Union[str, Any]="[CLS]" ,_a : Optional[Dict[str, Any]] = None ,**_a : Any ,):
'''simple docstring'''
A_ : Dict = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else bos_token
A_ : Union[str, Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else eos_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else unk_token
A_ : Union[str, Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else pad_token
A_ : Any = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else cls_token
A_ : Optional[int] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
A_ : List[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
A_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a ,eos_token=_a ,unk_token=_a ,pad_token=_a ,sep_token=_a ,mask_token=_a ,cls_token=_a ,sp_model_kwargs=self.sp_model_kwargs ,**_a ,)
A_ : Optional[int] = vocab_file
A_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def _a ( self : Union[str, Any] ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Tuple = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = self.__dict__.copy()
A_ : Union[str, Any] = None
return state
def __setstate__( self : List[Any] ,_a : Any ):
'''simple docstring'''
A_ : Tuple = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
A_ : Tuple = {}
A_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self : Union[str, Any] ,_a : str ):
'''simple docstring'''
return self.sp_model.encode(_a ,out_type=_a )
def _a ( self : Optional[int] ,_a : str ):
'''simple docstring'''
return self.sp_model.piece_to_id(_a )
def _a ( self : int ,_a : Optional[int] ):
'''simple docstring'''
A_ : List[str] = self.sp_model.IdToPiece(_a )
return token
def _a ( self : Dict ,_a : int ):
'''simple docstring'''
A_ : int = []
A_ : Any = """"""
A_ : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
A_ : Dict = True
A_ : Union[str, Any] = []
else:
current_sub_tokens.append(_a )
A_ : str = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def _a ( self : int ,_a : List[int] ,_a : bool = False ,_a : bool = None ,_a : bool = True ,**_a : str ,):
'''simple docstring'''
A_ : Any = kwargs.pop("""use_source_tokenizer""" ,_a )
A_ : Union[str, Any] = self.convert_ids_to_tokens(_a ,skip_special_tokens=_a )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
A_ : str = []
A_ : int = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
A_ : List[str] = []
sub_texts.append(_a )
else:
current_sub_text.append(_a )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
A_ : Optional[int] = re.sub(r""" (\[(MASK|SEP)\])""" ,r"""\1""" ,""" """.join(_a ) )
else:
A_ : Tuple = """""".join(_a )
A_ : str = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
A_ : Optional[Any] = self.clean_up_tokenization(_a )
return clean_text
else:
return text
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_a )
elif not os.path.isfile(self.vocab_file ):
with open(_a ,"""wb""" ) as fi:
A_ : str = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
def _a ( self : Optional[Any] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ : List[Any] = [self.cls_token_id]
A_ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self : Optional[int] ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
def _a ( self : Tuple ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Tuple = [self.sep_token_id]
A_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 665 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : int):
while b:
A_ , A_ : int = b, a % b
return a
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : int):
return a if b == 0 else euclidean_gcd_recursive(lowerCamelCase , a % b)
def lowerCamelCase ( ):
print(F'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5)}')
print(F'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3)}')
print(F'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3)}')
print(F'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6)}')
print(F'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3)}')
print(F'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5)}')
print(F'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3)}')
print(F'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3)}')
print(F'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6)}')
print(F'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3)}')
if __name__ == "__main__":
main()
| 665 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
a_ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a_ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _a ( self : List[str] ,_a : int ,_a : Any ,_a : int ):
'''simple docstring'''
A_ : Dict = TextaTextGenerationPipeline(model=_a ,tokenizer=_a )
return generator, ["Something to write", "Something else"]
def _a ( self : str ,_a : Union[str, Any] ,_a : int ):
'''simple docstring'''
A_ : Any = generator("""Something there""" )
self.assertEqual(_a ,[{"""generated_text""": ANY(_a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
A_ : List[Any] = generator(["""This is great !""", """Something else"""] ,num_return_sequences=2 ,do_sample=_a )
self.assertEqual(
_a ,[
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] ,)
A_ : List[str] = generator(
["""This is great !""", """Something else"""] ,num_return_sequences=2 ,batch_size=2 ,do_sample=_a )
self.assertEqual(
_a ,[
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] ,)
with self.assertRaises(_a ):
generator(4 )
@require_torch
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : int = pipeline("""text2text-generation""" ,model="""patrickvonplaten/t5-tiny-random""" ,framework="""pt""" )
# do_sample=False necessary for reproducibility
A_ : Tuple = generator("""Something there""" ,do_sample=_a )
self.assertEqual(_a ,[{"""generated_text""": """"""}] )
A_ : Optional[int] = 3
A_ : Tuple = generator(
"""Something there""" ,num_return_sequences=_a ,num_beams=_a ,)
A_ : Optional[Any] = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(_a ,_a )
A_ : Optional[int] = generator("""This is a test""" ,do_sample=_a ,num_return_sequences=2 ,return_tensors=_a )
self.assertEqual(
_a ,[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] ,)
A_ : Dict = generator.model.config.eos_token_id
A_ : Optional[int] = """<pad>"""
A_ : List[Any] = generator(
["""This is a test""", """This is a second test"""] ,do_sample=_a ,num_return_sequences=2 ,batch_size=2 ,return_tensors=_a ,)
self.assertEqual(
_a ,[
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] ,)
@require_tf
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = pipeline("""text2text-generation""" ,model="""patrickvonplaten/t5-tiny-random""" ,framework="""tf""" )
# do_sample=False necessary for reproducibility
A_ : Dict = generator("""Something there""" ,do_sample=_a )
self.assertEqual(_a ,[{"""generated_text""": """"""}] )
| 665 | 1 |
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 665 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """gpt_bigcode"""
a_ = ["""past_key_values"""]
a_ = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Optional[int] ,_a : Optional[int]=50257 ,_a : Dict=1024 ,_a : Union[str, Any]=768 ,_a : Union[str, Any]=12 ,_a : Union[str, Any]=12 ,_a : Tuple=None ,_a : int="gelu_pytorch_tanh" ,_a : Optional[Any]=0.1 ,_a : List[str]=0.1 ,_a : Union[str, Any]=0.1 ,_a : List[Any]=1e-5 ,_a : List[str]=0.02 ,_a : Any=True ,_a : Union[str, Any]=True ,_a : Tuple=50256 ,_a : Optional[int]=50256 ,_a : int=True ,_a : Optional[int]=True ,_a : Optional[int]=True ,**_a : List[str] ,):
'''simple docstring'''
A_ : Optional[Any] = vocab_size
A_ : int = n_positions
A_ : Union[str, Any] = n_embd
A_ : int = n_layer
A_ : Optional[int] = n_head
A_ : Union[str, Any] = n_inner
A_ : List[Any] = activation_function
A_ : Dict = resid_pdrop
A_ : int = embd_pdrop
A_ : Optional[int] = attn_pdrop
A_ : Union[str, Any] = layer_norm_epsilon
A_ : int = initializer_range
A_ : Union[str, Any] = scale_attn_weights
A_ : List[str] = use_cache
A_ : Tuple = attention_softmax_in_fpaa
A_ : List[str] = scale_attention_softmax_in_fpaa
A_ : Union[str, Any] = multi_query
A_ : Any = bos_token_id
A_ : Optional[int] = eos_token_id
super().__init__(bos_token_id=_a ,eos_token_id=_a ,**_a )
| 665 | 1 |
'''simple docstring'''
from __future__ import annotations
import requests
def lowerCamelCase ( lowerCamelCase : str):
A_ : Dict = F'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(lowerCamelCase).json()
def lowerCamelCase ( lowerCamelCase : int = 10):
A_ : Tuple = """https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"""
A_ : Optional[int] = requests.get(lowerCamelCase).json()[:max_stories]
return [get_hackernews_story(lowerCamelCase) for story_id in story_ids]
def lowerCamelCase ( lowerCamelCase : int = 10):
A_ : Optional[Any] = hackernews_top_stories(lowerCamelCase)
return "\n".join("""* [{title}]({url})""".format(**lowerCamelCase) for story in stories)
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 665 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
__magic_name__ = {
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
__magic_name__ = {
'allenai/longformer-base-4096': 4_096,
'allenai/longformer-large-4096': 4_096,
'allenai/longformer-large-4096-finetuned-triviaqa': 4_096,
'allenai/longformer-base-4096-extra.pos.embd.only': 4_096,
'allenai/longformer-large-4096-extra.pos.embd.only': 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCamelCase ( ):
A_ : Union[str, Any] = (
list(range(ord("""!""") , ord("""~""") + 1)) + list(range(ord("""¡""") , ord("""¬""") + 1)) + list(range(ord("""®""") , ord("""ÿ""") + 1))
)
A_ : Optional[Any] = bs[:]
A_ : List[str] = 0
for b in range(2**8):
if b not in bs:
bs.append(lowerCamelCase)
cs.append(2**8 + n)
n += 1
A_ : List[Any] = [chr(lowerCamelCase) for n in cs]
return dict(zip(lowerCamelCase , lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : int):
A_ : int = set()
A_ : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
A_ : List[str] = char
return pairs
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self : int ,_a : Tuple ,_a : Union[str, Any] ,_a : Optional[Any]="replace" ,_a : Union[str, Any]="<s>" ,_a : Union[str, Any]="</s>" ,_a : int="</s>" ,_a : List[str]="<s>" ,_a : List[Any]="<unk>" ,_a : Any="<pad>" ,_a : Dict="<mask>" ,_a : Optional[int]=False ,**_a : List[Any] ,):
'''simple docstring'''
A_ : Dict = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else bos_token
A_ : Optional[int] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else eos_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else sep_token
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else cls_token
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else unk_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A_ : Any = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
super().__init__(
errors=_a ,bos_token=_a ,eos_token=_a ,unk_token=_a ,sep_token=_a ,cls_token=_a ,pad_token=_a ,mask_token=_a ,add_prefix_space=_a ,**_a ,)
with open(_a ,encoding="""utf-8""" ) as vocab_handle:
A_ : str = json.load(_a )
A_ : Optional[int] = {v: k for k, v in self.encoder.items()}
A_ : List[str] = errors # how to handle errors in decoding
A_ : List[str] = bytes_to_unicode()
A_ : str = {v: k for k, v in self.byte_encoder.items()}
with open(_a ,encoding="""utf-8""" ) as merges_handle:
A_ : Any = merges_handle.read().split("""\n""" )[1:-1]
A_ : str = [tuple(merge.split() ) for merge in bpe_merges]
A_ : int = dict(zip(_a ,range(len(_a ) ) ) )
A_ : List[Any] = {}
A_ : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A_ : Optional[Any] = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def _a ( self : Any ):
'''simple docstring'''
return len(self.encoder )
def _a ( self : str ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def _a ( self : int ,_a : int ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A_ : Optional[int] = tuple(_a )
A_ : Any = get_pairs(_a )
if not pairs:
return token
while True:
A_ : Optional[Any] = min(_a ,key=lambda _a : self.bpe_ranks.get(_a ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A_ , A_ : Dict = bigram
A_ : int = []
A_ : Optional[Any] = 0
while i < len(_a ):
try:
A_ : List[str] = word.index(_a ,_a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A_ : Tuple = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A_ : str = tuple(_a )
A_ : str = new_word
if len(_a ) == 1:
break
else:
A_ : int = get_pairs(_a )
A_ : Optional[int] = """ """.join(_a )
A_ : List[str] = word
return word
def _a ( self : Dict ,_a : Optional[int] ):
'''simple docstring'''
A_ : Any = []
for token in re.findall(self.pat ,_a ):
A_ : Any = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_a ).split(""" """ ) )
return bpe_tokens
def _a ( self : Union[str, Any] ,_a : Optional[int] ):
'''simple docstring'''
return self.encoder.get(_a ,self.encoder.get(self.unk_token ) )
def _a ( self : int ,_a : Dict ):
'''simple docstring'''
return self.decoder.get(_a )
def _a ( self : Optional[int] ,_a : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = """""".join(_a )
A_ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors )
return text
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_a ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_a ,ensure_ascii=_a ) + """\n""" )
A_ : int = 0
with open(_a ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _a : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
A_ : Dict = token_index
writer.write(""" """.join(_a ) + """\n""" )
index += 1
return vocab_file, merge_file
def _a ( self : List[str] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ : int = [self.cls_token_id]
A_ : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _a ( self : int ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def _a ( self : Any ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Union[str, Any] = [self.sep_token_id]
A_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a ( self : str ,_a : Optional[int] ,_a : Union[str, Any]=False ,**_a : Dict ):
'''simple docstring'''
A_ : Any = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_a ) > 0 and not text[0].isspace()):
A_ : Optional[int] = """ """ + text
return (text, kwargs)
| 665 | 1 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = (EulerDiscreteScheduler,)
a_ = 10
def _a ( self : Optional[int] ,**_a : List[str] ):
'''simple docstring'''
A_ : Dict = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**_a )
return config
def _a ( self : int ):
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_a )
def _a ( self : List[Any] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] ,[0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_a ,beta_end=_a )
def _a ( self : str ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_a )
def _a ( self : Any ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Tuple = self.scheduler_classes[0]
A_ : Optional[int] = self.get_scheduler_config()
A_ : Optional[int] = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
A_ : Any = torch.manual_seed(0 )
A_ : Any = self.dummy_model()
A_ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
A_ : str = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
A_ : int = scheduler.scale_model_input(_a ,_a )
A_ : Union[str, Any] = model(_a ,_a )
A_ : str = scheduler.step(_a ,_a ,_a ,generator=_a )
A_ : str = output.prev_sample
A_ : Union[str, Any] = torch.sum(torch.abs(_a ) )
A_ : Optional[Any] = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : List[str] = self.scheduler_classes[0]
A_ : List[str] = self.get_scheduler_config(prediction_type="""v_prediction""" )
A_ : Union[str, Any] = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
A_ : Union[str, Any] = torch.manual_seed(0 )
A_ : Optional[Any] = self.dummy_model()
A_ : Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma
A_ : Any = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
A_ : Tuple = scheduler.scale_model_input(_a ,_a )
A_ : Tuple = model(_a ,_a )
A_ : Any = scheduler.step(_a ,_a ,_a ,generator=_a )
A_ : Tuple = output.prev_sample
A_ : Union[str, Any] = torch.sum(torch.abs(_a ) )
A_ : str = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.26_76e-06 ) < 1e-3
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Optional[int] = self.scheduler_classes[0]
A_ : Dict = self.get_scheduler_config()
A_ : int = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps ,device=_a )
A_ : int = torch.manual_seed(0 )
A_ : Tuple = self.dummy_model()
A_ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
A_ : str = sample.to(_a )
for t in scheduler.timesteps:
A_ : int = scheduler.scale_model_input(_a ,_a )
A_ : List[Any] = model(_a ,_a )
A_ : Optional[Any] = scheduler.step(_a ,_a ,_a ,generator=_a )
A_ : List[Any] = output.prev_sample
A_ : Union[str, Any] = torch.sum(torch.abs(_a ) )
A_ : List[Any] = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Optional[Any] = self.scheduler_classes[0]
A_ : str = self.get_scheduler_config()
A_ : int = scheduler_class(**_a ,use_karras_sigmas=_a )
scheduler.set_timesteps(self.num_inference_steps ,device=_a )
A_ : Optional[int] = torch.manual_seed(0 )
A_ : Tuple = self.dummy_model()
A_ : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
A_ : List[str] = sample.to(_a )
for t in scheduler.timesteps:
A_ : List[Any] = scheduler.scale_model_input(_a ,_a )
A_ : Any = model(_a ,_a )
A_ : Any = scheduler.step(_a ,_a ,_a ,generator=_a )
A_ : Any = output.prev_sample
A_ : Dict = torch.sum(torch.abs(_a ) )
A_ : Optional[int] = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 124.52299499511719 ) < 1e-2
assert abs(result_mean.item() - 0.16213932633399963 ) < 1e-3
| 665 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.txt'}
__magic_name__ = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__magic_name__ = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__magic_name__ = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_INIT_CONFIGURATION
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ConvBertTokenizer
def __init__( self : str ,_a : Dict=None ,_a : List[Any]=None ,_a : Dict=True ,_a : List[str]="[UNK]" ,_a : Any="[SEP]" ,_a : str="[PAD]" ,_a : List[Any]="[CLS]" ,_a : List[str]="[MASK]" ,_a : Union[str, Any]=True ,_a : Any=None ,**_a : Optional[int] ,):
'''simple docstring'''
super().__init__(
_a ,tokenizer_file=_a ,do_lower_case=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,tokenize_chinese_chars=_a ,strip_accents=_a ,**_a ,)
A_ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,_a ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,_a ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,_a ) != tokenize_chinese_chars
):
A_ : Dict = getattr(_a ,normalizer_state.pop("""type""" ) )
A_ : str = do_lower_case
A_ : Any = strip_accents
A_ : int = tokenize_chinese_chars
A_ : Tuple = normalizer_class(**_a )
A_ : Any = do_lower_case
def _a ( self : List[Any] ,_a : List[Any] ,_a : Any=None ):
'''simple docstring'''
A_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a ( self : Dict ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : int = [self.sep_token_id]
A_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
A_ : List[Any] = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
| 665 | 1 |
'''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Tuple = tempfile.mkdtemp()
A_ : Any = 5
# Realm tok
A_ : Optional[Any] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""test""",
"""question""",
"""this""",
"""is""",
"""the""",
"""first""",
"""second""",
"""third""",
"""fourth""",
"""fifth""",
"""record""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
A_ : str = os.path.join(self.tmpdirname ,"""realm_tokenizer""" )
os.makedirs(_a ,exist_ok=_a )
A_ : int = os.path.join(_a ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
A_ : Optional[Any] = os.path.join(self.tmpdirname ,"""realm_block_records""" )
os.makedirs(_a ,exist_ok=_a )
def _a ( self : Any ):
'''simple docstring'''
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"""realm_tokenizer""" ) )
def _a ( self : Dict ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Tuple = RealmConfig(num_block_records=self.num_block_records )
return config
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Any = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""question""": ["""foo""", """bar"""],
"""answers""": [["""Foo""", """Bar"""], ["""Bar"""]],
} )
return dataset
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Tuple = np.array(
[
B"""This is the first record""",
B"""This is the second record""",
B"""This is the third record""",
B"""This is the fourth record""",
B"""This is the fifth record""",
B"""This is a longer longer longer record""",
] ,dtype=_a ,)
return block_records
def _a ( self : str ):
'''simple docstring'''
A_ : Optional[int] = RealmRetriever(
block_records=self.get_dummy_block_records() ,tokenizer=self.get_tokenizer() ,)
return retriever
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : int = self.get_config()
A_ : str = self.get_dummy_retriever()
A_ : str = retriever.tokenizer
A_ : str = np.array([0, 3] ,dtype="""long""" )
A_ : Optional[int] = tokenizer(["""Test question"""] ).input_ids
A_ : Optional[Any] = tokenizer(
["""the fourth"""] ,add_special_tokens=_a ,return_token_type_ids=_a ,return_attention_mask=_a ,).input_ids
A_ : Dict = config.reader_seq_len
A_ , A_ , A_ , A_ : Optional[Any] = retriever(
_a ,_a ,answer_ids=_a ,max_length=_a ,return_tensors="""np""" )
self.assertEqual(len(_a ) ,2 )
self.assertEqual(len(_a ) ,2 )
self.assertEqual(len(_a ) ,2 )
self.assertEqual(concat_inputs.input_ids.shape ,(2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape ,(2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape ,(2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape ,(2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) ,["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """first""", """record""", """[SEP]"""] ,)
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) ,["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """fourth""", """record""", """[SEP]"""] ,)
def _a ( self : Any ):
'''simple docstring'''
A_ : List[str] = self.get_config()
A_ : str = self.get_dummy_retriever()
A_ : Dict = retriever.tokenizer
A_ : str = np.array([0, 3, 5] ,dtype="""long""" )
A_ : Any = tokenizer(["""Test question"""] ).input_ids
A_ : Dict = tokenizer(
["""the fourth""", """longer longer"""] ,add_special_tokens=_a ,return_token_type_ids=_a ,return_attention_mask=_a ,).input_ids
A_ : Union[str, Any] = config.reader_seq_len
A_ , A_ , A_ , A_ : int = retriever(
_a ,_a ,answer_ids=_a ,max_length=_a ,return_tensors="""np""" )
self.assertEqual([False, True, True] ,_a )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] ,_a )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] ,_a )
def _a ( self : Any ):
'''simple docstring'''
A_ : Optional[int] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname ,"""realm_block_records""" ) )
# Test local path
A_ : Dict = retriever.from_pretrained(os.path.join(self.tmpdirname ,"""realm_block_records""" ) )
self.assertEqual(retriever.block_records[0] ,B"""This is the first record""" )
# Test mocked remote path
with patch("""transformers.models.realm.retrieval_realm.hf_hub_download""" ) as mock_hf_hub_download:
A_ : int = os.path.join(
os.path.join(self.tmpdirname ,"""realm_block_records""" ) ,_REALM_BLOCK_RECORDS_FILENAME )
A_ : str = RealmRetriever.from_pretrained("""google/realm-cc-news-pretrained-openqa""" )
self.assertEqual(retriever.block_records[0] ,B"""This is the first record""" )
| 665 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all BART models at https://huggingface.co/models?filter=bart
__magic_name__ = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
'tokenizer_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json',
},
}
__magic_name__ = {
'facebook/bart-base': 1_024,
'facebook/bart-large': 1_024,
'facebook/bart-large-mnli': 1_024,
'facebook/bart-large-cnn': 1_024,
'facebook/bart-large-xsum': 1_024,
'yjernite/bart_eli5': 1_024,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
a_ = BartTokenizer
def __init__( self : str ,_a : Any=None ,_a : Optional[int]=None ,_a : int=None ,_a : Optional[int]="replace" ,_a : Dict="<s>" ,_a : Optional[Any]="</s>" ,_a : Dict="</s>" ,_a : Tuple="<s>" ,_a : Optional[Any]="<unk>" ,_a : List[str]="<pad>" ,_a : int="<mask>" ,_a : str=False ,_a : List[str]=True ,**_a : Dict ,):
'''simple docstring'''
super().__init__(
_a ,_a ,tokenizer_file=_a ,errors=_a ,bos_token=_a ,eos_token=_a ,sep_token=_a ,cls_token=_a ,unk_token=_a ,pad_token=_a ,mask_token=_a ,add_prefix_space=_a ,trim_offsets=_a ,**_a ,)
A_ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" ,_a ) != add_prefix_space:
A_ : List[str] = getattr(_a ,pre_tok_state.pop("""type""" ) )
A_ : Optional[int] = add_prefix_space
A_ : int = pre_tok_class(**_a )
A_ : str = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A_ : str = """post_processor"""
A_ : List[Any] = getattr(self.backend_tokenizer ,_a ,_a )
if tokenizer_component_instance:
A_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A_ : Tuple = tuple(state["""sep"""] )
if "cls" in state:
A_ : Tuple = tuple(state["""cls"""] )
A_ : List[str] = False
if state.get("""add_prefix_space""" ,_a ) != add_prefix_space:
A_ : Dict = add_prefix_space
A_ : Any = True
if state.get("""trim_offsets""" ,_a ) != trim_offsets:
A_ : Union[str, Any] = trim_offsets
A_ : List[Any] = True
if changes_to_apply:
A_ : Optional[int] = getattr(_a ,state.pop("""type""" ) )
A_ : Tuple = component_class(**_a )
setattr(self.backend_tokenizer ,_a ,_a )
@property
def _a ( self : List[str] ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def _a ( self : Union[str, Any] ,_a : Any ):
'''simple docstring'''
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else value
A_ : List[Any] = value
def _a ( self : str ,*_a : str ,**_a : Optional[int] ):
'''simple docstring'''
A_ : Optional[Any] = kwargs.get("""is_split_into_words""" ,_a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*_a ,**_a )
def _a ( self : str ,*_a : List[Any] ,**_a : str ):
'''simple docstring'''
A_ : List[str] = kwargs.get("""is_split_into_words""" ,_a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*_a ,**_a )
def _a ( self : Optional[int] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
A_ : str = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
def _a ( self : str ,_a : Optional[int] ,_a : int=None ):
'''simple docstring'''
A_ : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _a ( self : Optional[int] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Dict = [self.sep_token_id]
A_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 665 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = GPTaTokenizer
a_ = GPTaTokenizerFast
a_ = True
a_ = {"""add_prefix_space""": True}
a_ = False
def _a ( self : List[str] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A_ : List[str] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
A_ : Optional[int] = dict(zip(_a ,range(len(_a ) ) ) )
A_ : Tuple = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
A_ : Union[str, Any] = {"""unk_token""": """<unk>"""}
A_ : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
A_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_a ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_a ) )
def _a ( self : Dict ,**_a : Tuple ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname ,**_a )
def _a ( self : List[str] ,**_a : str ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname ,**_a )
def _a ( self : List[Any] ,_a : str ):
'''simple docstring'''
A_ : Optional[int] = """lower newer"""
A_ : List[Any] = """lower newer"""
return input_text, output_text
def _a ( self : Dict ):
'''simple docstring'''
A_ : Any = GPTaTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
A_ : str = """lower newer"""
A_ : Tuple = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
A_ : str = tokenizer.tokenize(_a ,add_prefix_space=_a )
self.assertListEqual(_a ,_a )
A_ : Optional[int] = tokens + [tokenizer.unk_token]
A_ : Tuple = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) ,_a )
def _a ( self : Optional[Any] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
A_ : List[Any] = self.get_tokenizer()
A_ : int = self.get_rust_tokenizer(add_prefix_space=_a )
A_ : Tuple = """lower newer"""
# Testing tokenization
A_ : str = tokenizer.tokenize(_a ,add_prefix_space=_a )
A_ : Optional[int] = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a ,_a )
# Testing conversion to ids without special tokens
A_ : Dict = tokenizer.encode(_a ,add_special_tokens=_a ,add_prefix_space=_a )
A_ : Optional[Any] = rust_tokenizer.encode(_a ,add_special_tokens=_a )
self.assertListEqual(_a ,_a )
# Testing conversion to ids with special tokens
A_ : Tuple = self.get_rust_tokenizer(add_prefix_space=_a )
A_ : Tuple = tokenizer.encode(_a ,add_prefix_space=_a )
A_ : Any = rust_tokenizer.encode(_a )
self.assertListEqual(_a ,_a )
# Testing the unknown token
A_ : List[str] = tokens + [rust_tokenizer.unk_token]
A_ : str = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_a ) ,_a )
def _a ( self : Optional[Any] ,*_a : List[str] ,**_a : Optional[Any] ):
'''simple docstring'''
pass
def _a ( self : Optional[Any] ,_a : Dict=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A_ : Dict = self.rust_tokenizer_class.from_pretrained(_a ,**_a )
# Simple input
A_ : Tuple = """This is a simple input"""
A_ : Tuple = ["""This is a simple input 1""", """This is a simple input 2"""]
A_ : List[str] = ("""This is a simple input""", """This is a pair""")
A_ : Any = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(_a ,tokenizer_r.encode ,_a ,max_length=_a ,padding="""max_length""" )
# Simple input
self.assertRaises(_a ,tokenizer_r.encode_plus ,_a ,max_length=_a ,padding="""max_length""" )
# Simple input
self.assertRaises(
_a ,tokenizer_r.batch_encode_plus ,_a ,max_length=_a ,padding="""max_length""" ,)
# Pair input
self.assertRaises(_a ,tokenizer_r.encode ,_a ,max_length=_a ,padding="""max_length""" )
# Pair input
self.assertRaises(_a ,tokenizer_r.encode_plus ,_a ,max_length=_a ,padding="""max_length""" )
# Pair input
self.assertRaises(
_a ,tokenizer_r.batch_encode_plus ,_a ,max_length=_a ,padding="""max_length""" ,)
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : List[Any] = GPTaTokenizer.from_pretrained(self.tmpdirname ,pad_token="""<pad>""" )
# Simple input
A_ : Dict = """This is a simple input"""
A_ : List[str] = ["""This is a simple input looooooooong""", """This is a simple input"""]
A_ : int = ("""This is a simple input""", """This is a pair""")
A_ : Tuple = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
A_ : Optional[int] = tokenizer.pad_token_id
A_ : Union[str, Any] = tokenizer(_a ,padding="""max_length""" ,max_length=30 ,return_tensors="""np""" )
A_ : Union[str, Any] = tokenizer(_a ,padding=_a ,truncate=_a ,return_tensors="""np""" )
A_ : List[Any] = tokenizer(*_a ,padding="""max_length""" ,max_length=60 ,return_tensors="""np""" )
A_ : str = tokenizer(_a ,padding=_a ,truncate=_a ,return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] ,30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] ,33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] ,60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] ,52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def _a ( self : Dict ):
'''simple docstring'''
A_ : Any = """$$$"""
A_ : str = GPTaTokenizer.from_pretrained(self.tmpdirname ,bos_token=_a ,add_bos_token=_a )
A_ : Optional[Any] = """This is a simple input"""
A_ : Any = ["""This is a simple input 1""", """This is a simple input 2"""]
A_ : Dict = tokenizer.bos_token_id
A_ : int = tokenizer(_a )
A_ : Union[str, Any] = tokenizer(_a )
self.assertEqual(out_s.input_ids[0] ,_a )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
A_ : Any = tokenizer.decode(out_s.input_ids )
A_ : Dict = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] ,_a )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def _a ( self : Optional[int] ):
'''simple docstring'''
pass
def _a ( self : Dict ):
'''simple docstring'''
A_ : Dict = [self.get_tokenizer(do_lower_case=_a ,add_bos_token=_a )]
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
A_ : Dict = """Encode this."""
A_ : Optional[int] = """This one too please."""
A_ : List[Any] = tokenizer.encode(_a ,add_special_tokens=_a )
encoded_sequence += tokenizer.encode(_a ,add_special_tokens=_a )
A_ : List[str] = tokenizer.encode_plus(
_a ,_a ,add_special_tokens=_a ,return_special_tokens_mask=_a ,)
A_ : str = encoded_sequence_dict["""input_ids"""]
A_ : List[Any] = encoded_sequence_dict["""special_tokens_mask"""]
self.assertEqual(len(_a ) ,len(_a ) )
A_ : List[Any] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(_a )
]
A_ : int = [x for x in filtered_sequence if x is not None]
self.assertEqual(_a ,_a )
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Dict = AutoTokenizer.from_pretrained("""facebook/opt-350m""" ,from_slow=_a )
A_ : int = """A photo of a cat"""
A_ : Union[str, Any] = tokenizer.encode(
_a ,)
self.assertEqual(_a ,[2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("""test_opt""" )
A_ : int = AutoTokenizer.from_pretrained("""./test_opt""" )
A_ : Optional[int] = tokenizer.encode(
_a ,)
self.assertEqual(_a ,[2, 250, 1345, 9, 10, 4758] )
def _a ( self : int ):
'''simple docstring'''
A_ : int = AutoTokenizer.from_pretrained("""facebook/opt-350m""" ,use_slow=_a )
A_ : List[str] = """A photo of a cat"""
A_ : Union[str, Any] = tokenizer.encode(
_a ,)
# Same as above
self.assertEqual(_a ,[2, 250, 1345, 9, 10, 4758] )
@unittest.skip("""This test is failing because of a bug in the fast tokenizer""" )
def _a ( self : int ):
'''simple docstring'''
A_ : Dict = AutoTokenizer.from_pretrained("""facebook/opt-350m""" ,from_slow=_a )
A_ : Optional[int] = """bos"""
A_ : Optional[int] = tokenizer.get_vocab()["""bos"""]
A_ : Optional[Any] = """A photo of a cat"""
A_ : Tuple = tokenizer.encode(
_a ,)
# We changed the bos token
self.assertEqual(_a ,[31957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("""./tok""" )
A_ : Union[str, Any] = AutoTokenizer.from_pretrained("""./tok""" )
self.assertTrue(tokenizer.is_fast )
A_ : Optional[int] = tokenizer.encode(
_a ,)
self.assertEqual(_a ,[31957, 250, 1345, 9, 10, 4758] )
| 665 |
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : str):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
A_ : int = TapasConfig.from_json_file(lowerCamelCase)
# set absolute/relative position embeddings parameter
A_ : List[Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
A_ : Optional[int] = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "WTQ":
# run_task_main.py hparams
A_ : Tuple = 4
A_ : Optional[Any] = True
# hparam_utils.py hparams
A_ : Any = 0.66_4694
A_ : str = 0.20_7951
A_ : Any = 0.12_1194
A_ : str = True
A_ : Dict = True
A_ : int = False
A_ : int = 0.035_2513
A_ : Tuple = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
A_ : int = 4
A_ : Union[str, Any] = False
# hparam_utils.py hparams
A_ : Dict = 36.4519
A_ : List[Any] = 0.90_3421
A_ : Any = 222.088
A_ : Optional[Any] = True
A_ : Optional[int] = True
A_ : Optional[Any] = True
A_ : Optional[int] = 0.76_3141
A_ : Any = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "TABFACT":
A_ : Any = TapasForSequenceClassification(config=lowerCamelCase)
elif task == "MLM":
A_ : List[Any] = TapasForMaskedLM(config=lowerCamelCase)
elif task == "INTERMEDIATE_PRETRAINING":
A_ : Union[str, Any] = TapasModel(config=lowerCamelCase)
else:
raise ValueError(F'Task {task} not supported.')
print(F'Building PyTorch model from configuration: {config}')
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Save pytorch-model (weights and configuration)
print(F'Save PyTorch model to {pytorch_dump_path}')
model.save_pretrained(lowerCamelCase)
# Save tokenizer files
print(F'Save tokenizer files to {pytorch_dump_path}')
A_ : Optional[Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512)
tokenizer.save_pretrained(lowerCamelCase)
print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__magic_name__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 665 | 1 |
'''simple docstring'''
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Any ,_a : Tuple ,_a : str=100 ,_a : Optional[Any]=13 ,_a : Optional[int]=30 ,_a : List[str]=2 ,_a : Any=3 ,_a : Optional[int]=True ,_a : str=True ,_a : Any=32 ,_a : int=4 ,_a : Optional[Any]=4 ,_a : Any=37 ,_a : str="gelu" ,_a : List[str]=0.1 ,_a : Optional[int]=0.1 ,_a : List[Any]=10 ,_a : Any=0.02 ,_a : Dict=3 ,_a : Optional[int]=None ,_a : int=[0, 1, 2, 3] ,):
'''simple docstring'''
A_ : List[str] = parent
A_ : Union[str, Any] = 100
A_ : Dict = batch_size
A_ : Optional[int] = image_size
A_ : Tuple = patch_size
A_ : Any = num_channels
A_ : Tuple = is_training
A_ : Any = use_labels
A_ : Tuple = hidden_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Tuple = num_attention_heads
A_ : Optional[Any] = intermediate_size
A_ : Union[str, Any] = hidden_act
A_ : List[str] = hidden_dropout_prob
A_ : str = attention_probs_dropout_prob
A_ : Any = type_sequence_label_size
A_ : Optional[Any] = initializer_range
A_ : Any = scope
A_ : Dict = out_indices
A_ : Optional[int] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A_ : str = (image_size // patch_size) ** 2
A_ : Tuple = num_patches + 1
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : str = None
A_ : Optional[int] = None
if self.use_labels:
A_ : Tuple = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
A_ : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def _a ( self : Tuple ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size ,image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_a ,initializer_range=self.initializer_range ,out_indices=self.out_indices ,)
def _a ( self : int ,_a : Dict ,_a : Union[str, Any] ,_a : Optional[Any] ,_a : List[str] ):
'''simple docstring'''
A_ : List[Any] = BeitModel(config=_a )
model.to(_a )
model.eval()
A_ : Tuple = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : List[str] ,_a : Tuple ,_a : Optional[Any] ,_a : Union[str, Any] ,_a : int ):
'''simple docstring'''
A_ : Dict = BeitForMaskedImageModeling(config=_a )
model.to(_a )
model.eval()
A_ : Any = model(_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length - 1, self.vocab_size) )
def _a ( self : Optional[int] ,_a : Optional[int] ,_a : Optional[Any] ,_a : Union[str, Any] ,_a : Tuple ):
'''simple docstring'''
A_ : int = self.type_sequence_label_size
A_ : str = BeitForImageClassification(_a )
model.to(_a )
model.eval()
A_ : List[Any] = model(_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A_ : Tuple = 1
A_ : int = BeitForImageClassification(_a )
model.to(_a )
model.eval()
A_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : str = model(_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _a ( self : Union[str, Any] ,_a : Dict ,_a : Any ,_a : Dict ,_a : Optional[int] ):
'''simple docstring'''
A_ : Optional[int] = self.num_labels
A_ : List[str] = BeitForSemanticSegmentation(_a )
model.to(_a )
model.eval()
A_ : int = model(_a )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
A_ : int = model(_a ,labels=_a )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def _a ( self : Any ):
'''simple docstring'''
A_ : Any = self.prepare_config_and_inputs()
A_ , A_ , A_ , A_ : str = config_and_inputs
A_ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
a_ = (
{
"""feature-extraction""": BeitModel,
"""image-classification""": BeitForImageClassification,
"""image-segmentation""": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a_ = False
a_ = False
a_ = False
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = BeitModelTester(self )
A_ : List[Any] = ConfigTester(self ,config_class=_a ,has_text_modality=_a ,hidden_size=37 )
def _a ( self : Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""BEiT does not use inputs_embeds""" )
def _a ( self : str ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def _a ( self : Optional[int] ):
'''simple docstring'''
pass
def _a ( self : List[str] ):
'''simple docstring'''
A_ , A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[Any] = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
A_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a ,nn.Linear ) )
def _a ( self : List[str] ):
'''simple docstring'''
A_ , A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Dict = model_class(_a )
A_ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : str = [*signature.parameters.keys()]
A_ : int = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,_a )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _a ( self : Any ):
'''simple docstring'''
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_a )
def _a ( self : int ):
'''simple docstring'''
if not self.model_tester.is_training:
return
A_ , A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Union[str, Any] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(_a ), BeitForMaskedImageModeling]:
continue
A_ : Tuple = model_class(_a )
model.to(_a )
model.train()
A_ : Any = self._prepare_for_class(_a ,_a ,return_labels=_a )
A_ : int = model(**_a ).loss
loss.backward()
def _a ( self : List[str] ):
'''simple docstring'''
A_ , A_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
A_ : List[str] = False
A_ : Dict = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(_a ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
A_ : Optional[int] = model_class(_a )
model.gradient_checkpointing_enable()
model.to(_a )
model.train()
A_ : str = self._prepare_for_class(_a ,_a ,return_labels=_a )
A_ : Any = model(**_a ).loss
loss.backward()
def _a ( self : List[Any] ):
'''simple docstring'''
A_ , A_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Tuple = _config_zero_init(_a )
for model_class in self.all_model_classes:
A_ : str = model_class(config=_a )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f'Parameter {name} of model {model_class} seems not properly initialized' ,)
@slow
def _a ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Any = BeitModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCamelCase ( ):
A_ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _a ( self : str ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None
@slow
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : int = BeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" ).to(_a )
A_ : int = self.default_image_processor
A_ : int = prepare_img()
A_ : Any = image_processor(images=_a ,return_tensors="""pt""" ).pixel_values.to(_a )
# prepare bool_masked_pos
A_ : Optional[Any] = torch.ones((1, 196) ,dtype=torch.bool ).to(_a )
# forward pass
with torch.no_grad():
A_ : List[str] = model(pixel_values=_a ,bool_masked_pos=_a )
A_ : Dict = outputs.logits
# verify the logits
A_ : Dict = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape ,_a )
A_ : List[Any] = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(_a )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] ,_a ,atol=1e-2 ) )
@slow
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : List[Any] = BeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" ).to(_a )
A_ : Tuple = self.default_image_processor
A_ : Optional[int] = prepare_img()
A_ : Optional[int] = image_processor(images=_a ,return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
A_ : Union[str, Any] = model(**_a )
A_ : Tuple = outputs.logits
# verify the logits
A_ : Any = torch.Size((1, 1000) )
self.assertEqual(logits.shape ,_a )
A_ : Tuple = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(_a )
self.assertTrue(torch.allclose(logits[0, :3] ,_a ,atol=1e-4 ) )
A_ : Optional[int] = 281
self.assertEqual(logits.argmax(-1 ).item() ,_a )
@slow
def _a ( self : Tuple ):
'''simple docstring'''
A_ : int = BeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" ).to(
_a )
A_ : str = self.default_image_processor
A_ : List[str] = prepare_img()
A_ : Any = image_processor(images=_a ,return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
A_ : Dict = model(**_a )
A_ : List[str] = outputs.logits
# verify the logits
A_ : Union[str, Any] = torch.Size((1, 21841) )
self.assertEqual(logits.shape ,_a )
A_ : int = torch.tensor([1.6881, -0.2787, 0.5901] ).to(_a )
self.assertTrue(torch.allclose(logits[0, :3] ,_a ,atol=1e-4 ) )
A_ : int = 2396
self.assertEqual(logits.argmax(-1 ).item() ,_a )
@slow
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : List[str] = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
A_ : str = model.to(_a )
A_ : List[Any] = BeitImageProcessor(do_resize=_a ,size=640 ,do_center_crop=_a )
A_ : List[str] = load_dataset("""hf-internal-testing/fixtures_ade20k""" ,split="""test""" )
A_ : str = Image.open(ds[0]["""file"""] )
A_ : str = image_processor(images=_a ,return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
A_ : str = model(**_a )
A_ : List[str] = outputs.logits
# verify the logits
A_ : int = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape ,_a )
A_ : Tuple = version.parse(PIL.__version__ ) < version.parse("""9.0.0""" )
if is_pillow_less_than_a:
A_ : Optional[int] = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] ,device=_a ,)
else:
A_ : Optional[Any] = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] ,device=_a ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,_a ,atol=1e-4 ) )
@slow
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Union[str, Any] = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
A_ : List[str] = model.to(_a )
A_ : Union[str, Any] = BeitImageProcessor(do_resize=_a ,size=640 ,do_center_crop=_a )
A_ : Any = load_dataset("""hf-internal-testing/fixtures_ade20k""" ,split="""test""" )
A_ : Union[str, Any] = Image.open(ds[0]["""file"""] )
A_ : Dict = image_processor(images=_a ,return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
A_ : int = model(**_a )
A_ : Optional[int] = outputs.logits.detach().cpu()
A_ : List[str] = image_processor.post_process_semantic_segmentation(outputs=_a ,target_sizes=[(500, 300)] )
A_ : Tuple = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape ,_a )
A_ : Dict = image_processor.post_process_semantic_segmentation(outputs=_a )
A_ : Tuple = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape ,_a )
| 665 |
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""vqvae"""]
def __init__( self : Optional[Any] ,_a : AutoencoderKL ,_a : UNetaDConditionModel ,_a : Mel ,_a : Union[DDIMScheduler, DDPMScheduler] ,):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_a ,scheduler=_a ,mel=_a ,vqvae=_a )
def _a ( self : str ):
'''simple docstring'''
return 50 if isinstance(self.scheduler ,_a ) else 1000
@torch.no_grad()
def __call__( self : Optional[int] ,_a : int = 1 ,_a : str = None ,_a : np.ndarray = None ,_a : int = 0 ,_a : int = 0 ,_a : int = None ,_a : torch.Generator = None ,_a : float = 0 ,_a : float = 0 ,_a : torch.Generator = None ,_a : float = 0 ,_a : torch.Tensor = None ,_a : torch.Tensor = None ,_a : int=True ,):
'''simple docstring'''
A_ : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(_a )
A_ : Union[str, Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
A_ : Tuple = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
A_ : int = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) ,generator=_a ,device=self.device ,)
A_ : List[Any] = noise
A_ : str = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_a ,_a )
A_ : Any = self.mel.audio_slice_to_image(_a )
A_ : Union[str, Any] = np.frombuffer(input_image.tobytes() ,dtype="""uint8""" ).reshape(
(input_image.height, input_image.width) )
A_ : Optional[Any] = (input_image / 255) * 2 - 1
A_ : Union[str, Any] = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device )
if self.vqvae is not None:
A_ : Union[str, Any] = self.vqvae.encode(torch.unsqueeze(_a ,0 ) ).latent_dist.sample(
generator=_a )[0]
A_ : List[str] = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
A_ : Any = self.scheduler.add_noise(_a ,_a ,self.scheduler.timesteps[start_step - 1] )
A_ : Tuple = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
A_ : Tuple = int(mask_start_secs * pixels_per_second )
A_ : str = int(mask_end_secs * pixels_per_second )
A_ : int = self.scheduler.add_noise(_a ,_a ,torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet ,_a ):
A_ : Optional[Any] = self.unet(_a ,_a ,_a )["""sample"""]
else:
A_ : List[Any] = self.unet(_a ,_a )["""sample"""]
if isinstance(self.scheduler ,_a ):
A_ : Dict = self.scheduler.step(
model_output=_a ,timestep=_a ,sample=_a ,eta=_a ,generator=_a ,)["""prev_sample"""]
else:
A_ : Any = self.scheduler.step(
model_output=_a ,timestep=_a ,sample=_a ,generator=_a ,)["""prev_sample"""]
if mask is not None:
if mask_start > 0:
A_ : Tuple = mask[:, step, :, :mask_start]
if mask_end > 0:
A_ : List[str] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
A_ : str = 1 / self.vqvae.config.scaling_factor * images
A_ : Union[str, Any] = self.vqvae.decode(_a )["""sample"""]
A_ : int = (images / 2 + 0.5).clamp(0 ,1 )
A_ : str = images.cpu().permute(0 ,2 ,3 ,1 ).numpy()
A_ : Optional[int] = (images * 255).round().astype("""uint8""" )
A_ : List[Any] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_a ,mode="""RGB""" ).convert("""L""" ) for _ in images) )
A_ : Tuple = [self.mel.image_to_audio(_a ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_a )[:, np.newaxis, :] ) ,**ImagePipelineOutput(_a ) )
@torch.no_grad()
def _a ( self : Union[str, Any] ,_a : List[Image.Image] ,_a : int = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler ,_a )
self.scheduler.set_timesteps(_a )
A_ : Optional[Any] = np.array(
[np.frombuffer(image.tobytes() ,dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] )
A_ : List[str] = (sample / 255) * 2 - 1
A_ : Optional[int] = torch.Tensor(_a ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ):
A_ : List[str] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
A_ : Any = self.scheduler.alphas_cumprod[t]
A_ : List[Any] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
A_ : str = 1 - alpha_prod_t
A_ : List[str] = self.unet(_a ,_a )["""sample"""]
A_ : str = (1 - alpha_prod_t_prev) ** 0.5 * model_output
A_ : Union[str, Any] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
A_ : Optional[int] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def _a ( _a : torch.Tensor ,_a : torch.Tensor ,_a : float ):
'''simple docstring'''
A_ : List[Any] = acos(torch.dot(torch.flatten(_a ) ,torch.flatten(_a ) ) / torch.norm(_a ) / torch.norm(_a ) )
return sin((1 - alpha) * theta ) * xa / sin(_a ) + sin(alpha * theta ) * xa / sin(_a )
| 665 | 1 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def lowerCamelCase ( lowerCamelCase : List[str]):
A_ : Dict = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
A_ : str = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
A_ : Optional[int] = 4
A_ : Optional[Any] = 48
A_ : Optional[int] = """pixelshuffle_aux"""
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
A_ : Tuple = [6, 6, 6, 6]
A_ : Any = 60
A_ : Optional[Any] = [6, 6, 6, 6]
A_ : Dict = """pixelshuffledirect"""
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
A_ : Optional[Any] = 4
A_ : int = """nearest+conv"""
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
A_ : Tuple = 1
A_ : Optional[int] = 1
A_ : List[str] = 126
A_ : int = 7
A_ : Optional[Any] = 255.0
A_ : Optional[int] = """"""
return config
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Dict):
if "patch_embed.proj" in name and "layers" not in name:
A_ : List[Any] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""")
if "patch_embed.norm" in name:
A_ : Union[str, Any] = name.replace("""patch_embed.norm""" , """embeddings.patch_embeddings.layernorm""")
if "layers" in name:
A_ : Union[str, Any] = name.replace("""layers""" , """encoder.stages""")
if "residual_group.blocks" in name:
A_ : Any = name.replace("""residual_group.blocks""" , """layers""")
if "attn.proj" in name:
A_ : int = name.replace("""attn.proj""" , """attention.output.dense""")
if "attn" in name:
A_ : List[str] = name.replace("""attn""" , """attention.self""")
if "norm1" in name:
A_ : str = name.replace("""norm1""" , """layernorm_before""")
if "norm2" in name:
A_ : Any = name.replace("""norm2""" , """layernorm_after""")
if "mlp.fc1" in name:
A_ : Dict = name.replace("""mlp.fc1""" , """intermediate.dense""")
if "mlp.fc2" in name:
A_ : Optional[Any] = name.replace("""mlp.fc2""" , """output.dense""")
if "q_bias" in name:
A_ : Union[str, Any] = name.replace("""q_bias""" , """query.bias""")
if "k_bias" in name:
A_ : Dict = name.replace("""k_bias""" , """key.bias""")
if "v_bias" in name:
A_ : Optional[Any] = name.replace("""v_bias""" , """value.bias""")
if "cpb_mlp" in name:
A_ : List[str] = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""")
if "patch_embed.proj" in name:
A_ : Optional[Any] = name.replace("""patch_embed.proj""" , """patch_embed.projection""")
if name == "norm.weight":
A_ : Any = """layernorm.weight"""
if name == "norm.bias":
A_ : Union[str, Any] = """layernorm.bias"""
if "conv_first" in name:
A_ : List[str] = name.replace("""conv_first""" , """first_convolution""")
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
A_ : Dict = name.replace("""conv_last""" , """final_convolution""")
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
A_ : Tuple = name.replace("""conv_before_upsample.0""" , """conv_before_upsample""")
if "upsample.0" in name:
A_ : Union[str, Any] = name.replace("""upsample.0""" , """upsample.convolution_0""")
if "upsample.2" in name:
A_ : List[str] = name.replace("""upsample.2""" , """upsample.convolution_1""")
A_ : Optional[int] = """upsample.""" + name
elif config.upsampler == "pixelshuffledirect":
A_ : List[Any] = name.replace("""upsample.0.weight""" , """upsample.conv.weight""")
A_ : Tuple = name.replace("""upsample.0.bias""" , """upsample.conv.bias""")
else:
pass
else:
A_ : Any = """swin2sr.""" + name
return name
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : List[Any]):
for key in orig_state_dict.copy().keys():
A_ : Optional[Any] = orig_state_dict.pop(lowerCamelCase)
if "qkv" in key:
A_ : Optional[int] = key.split(""".""")
A_ : Any = int(key_split[1])
A_ : List[Any] = int(key_split[4])
A_ : str = config.embed_dim
if "weight" in key:
A_ : Union[str, Any] = val[:dim, :]
A_ : List[Any] = val[dim : dim * 2, :]
A_ : Tuple = val[-dim:, :]
else:
A_ : Optional[int] = val[:dim]
A_ : Union[str, Any] = val[dim : dim * 2]
A_ : str = val[-dim:]
pass
else:
A_ : Any = val
return orig_state_dict
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : Tuple):
A_ : int = get_config(lowerCamelCase)
A_ : str = SwinaSRForImageSuperResolution(lowerCamelCase)
model.eval()
A_ : Union[str, Any] = torch.hub.load_state_dict_from_url(lowerCamelCase , map_location="""cpu""")
A_ : List[str] = convert_state_dict(lowerCamelCase , lowerCamelCase)
A_ , A_ : Any = model.load_state_dict(lowerCamelCase , strict=lowerCamelCase)
if len(lowerCamelCase) > 0:
raise ValueError("""Missing keys when converting: {}""".format(lowerCamelCase))
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F'Unexpected key {key} in state_dict')
# verify values
A_ : int = """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"""
A_ : Tuple = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase).raw).convert("""RGB""")
A_ : Any = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
A_ : Optional[int] = 126 if """Jpeg""" in checkpoint_url else 256
A_ : Tuple = Compose(
[
Resize((image_size, image_size)),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225]),
])
A_ : Union[str, Any] = transforms(lowerCamelCase).unsqueeze(0)
if config.num_channels == 1:
A_ : Tuple = pixel_values[:, 0, :, :].unsqueeze(1)
A_ : Optional[int] = model(lowerCamelCase)
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
A_ : Optional[Any] = torch.Size([1, 3, 512, 512])
A_ : List[Any] = torch.tensor(
[[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]])
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
A_ : str = torch.Size([1, 3, 1024, 1024])
A_ : Tuple = torch.tensor(
[[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]])
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
A_ : Dict = torch.Size([1, 3, 1024, 1024])
A_ : str = torch.tensor(
[[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]])
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
A_ : List[str] = torch.Size([1, 3, 512, 512])
A_ : List[str] = torch.tensor(
[[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]])
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
A_ : Optional[int] = torch.Size([1, 3, 1024, 1024])
A_ : Optional[int] = torch.tensor(
[[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]])
assert (
outputs.reconstruction.shape == expected_shape
), F'Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , lowerCamelCase , atol=1E-3)
print("""Looks ok!""")
A_ : Optional[int] = {
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": (
"""swin2SR-classical-sr-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": (
"""swin2SR-classical-sr-x4-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": (
"""swin2SR-compressed-sr-x4-48"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": (
"""swin2SR-lightweight-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": (
"""swin2SR-realworld-sr-x4-64-bsrgan-psnr"""
),
}
A_ : Any = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}')
model.save_pretrained(lowerCamelCase)
print(F'Saving image processor to {pytorch_dump_folder_path}')
processor.save_pretrained(lowerCamelCase)
if push_to_hub:
model.push_to_hub(F'caidas/{model_name}')
processor.push_to_hub(F'caidas/{model_name}')
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth',
type=str,
help='URL of the original Swin2SR checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the converted model to the hub.')
__magic_name__ = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 665 |
'''simple docstring'''
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__magic_name__ = 16
__magic_name__ = 32
def lowerCamelCase ( lowerCamelCase : Accelerator , lowerCamelCase : int = 16):
A_ : Any = AutoTokenizer.from_pretrained("""bert-base-cased""")
A_ : str = load_dataset("""glue""" , """mrpc""")
def tokenize_function(lowerCamelCase : Dict):
# max_length=None => use the model max length (it's actually the default)
A_ : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase , max_length=lowerCamelCase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A_ : Tuple = datasets.map(
lowerCamelCase , batched=lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A_ : List[str] = tokenized_datasets.rename_column("""label""" , """labels""")
def collate_fn(lowerCamelCase : Tuple):
# On TPU it's best to pad everything to the same length or training will be very slow.
A_ : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A_ : List[Any] = 16
elif accelerator.mixed_precision != "no":
A_ : Any = 8
else:
A_ : Tuple = None
return tokenizer.pad(
lowerCamelCase , padding="""longest""" , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
A_ : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase , drop_last=lowerCamelCase)
A_ : str = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase , drop_last=(accelerator.mixed_precision == """fp8""") , )
return train_dataloader, eval_dataloader
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Dict):
# Initialize accelerator
A_ : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A_ : List[Any] = config["""lr"""]
A_ : List[Any] = int(config["""num_epochs"""])
A_ : int = int(config["""seed"""])
A_ : Dict = int(config["""batch_size"""])
A_ : Union[str, Any] = evaluate.load("""glue""" , """mrpc""")
# If the batch size is too big we use gradient accumulation
A_ : int = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A_ : Any = batch_size // MAX_GPU_BATCH_SIZE
A_ : Union[str, Any] = MAX_GPU_BATCH_SIZE
set_seed(lowerCamelCase)
A_ , A_ : List[str] = get_dataloaders(lowerCamelCase , lowerCamelCase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCamelCase)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A_ : str = model.to(accelerator.device)
# Instantiate optimizer
A_ : str = AdamW(params=model.parameters() , lr=lowerCamelCase)
# Instantiate scheduler
A_ : Tuple = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(lowerCamelCase) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A_ , A_ , A_ , A_ , A_ : Union[str, Any] = accelerator.prepare(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Now we train the model
for epoch in range(lowerCamelCase):
model.train()
for step, batch in enumerate(lowerCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
A_ : Optional[int] = model(**lowerCamelCase)
A_ : List[Any] = outputs.loss
A_ : Tuple = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
A_ : Union[str, Any] = model(**lowerCamelCase)
A_ : Any = outputs.logits.argmax(dim=-1)
A_ , A_ : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""]))
metric.add_batch(
predictions=lowerCamelCase , references=lowerCamelCase , )
A_ : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowerCamelCase)
def lowerCamelCase ( ):
A_ : Optional[int] = argparse.ArgumentParser(description="""Simple example of training script.""")
parser.add_argument(
"""--mixed_precision""" , type=lowerCamelCase , default=lowerCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""")
A_ : Dict = parser.parse_args()
A_ : Dict = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowerCamelCase , lowerCamelCase)
if __name__ == "__main__":
main()
| 665 | 1 |
'''simple docstring'''
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : List[Any] , lowerCamelCase : List[str] , lowerCamelCase : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : List[str] , lowerCamelCase : Any , lowerCamelCase : List[str] , lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Dict , lowerCamelCase : int , ):
A_ : List[Any] = {
"""7z""": (seven_zip_file, SevenZipExtractor),
"""bz2""": (bza_file, BzipaExtractor),
"""gzip""": (gz_file, GzipExtractor),
"""lz4""": (lza_file, LzaExtractor),
"""tar""": (tar_file, TarExtractor),
"""xz""": (xz_file, XzExtractor),
"""zip""": (zip_file, ZipExtractor),
"""zstd""": (zstd_file, ZstdExtractor),
}
A_ , A_ : str = input_paths_and_base_extractors[compression_format]
if input_path is None:
A_ : str = F'for \'{compression_format}\' compression_format, '
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCamelCase)
assert base_extractor.is_extractable(lowerCamelCase)
A_ : Optional[Any] = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
base_extractor.extract(lowerCamelCase , lowerCamelCase)
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
A_ : Any = file_path.read_text(encoding="""utf-8""")
else:
A_ : Tuple = output_path.read_text(encoding="""utf-8""")
A_ : Dict = text_file.read_text(encoding="""utf-8""")
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : str , lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Dict , lowerCamelCase : Dict , lowerCamelCase : Dict , lowerCamelCase : List[Any] , lowerCamelCase : int , lowerCamelCase : Any , ):
A_ : Any = {
"""7z""": seven_zip_file,
"""bz2""": bza_file,
"""gzip""": gz_file,
"""lz4""": lza_file,
"""tar""": tar_file,
"""xz""": xz_file,
"""zip""": zip_file,
"""zstd""": zstd_file,
}
A_ : Any = input_paths[compression_format]
if input_path is None:
A_ : Optional[int] = F'for \'{compression_format}\' compression_format, '
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCamelCase)
A_ : Union[str, Any] = Extractor.infer_extractor_format(lowerCamelCase)
assert extractor_format is not None
A_ : Dict = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
Extractor.extract(lowerCamelCase , lowerCamelCase , lowerCamelCase)
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
A_ : List[str] = file_path.read_text(encoding="""utf-8""")
else:
A_ : Union[str, Any] = output_path.read_text(encoding="""utf-8""")
A_ : int = text_file.read_text(encoding="""utf-8""")
assert extracted_file_content == expected_file_content
@pytest.fixture
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Optional[Any]):
import tarfile
A_ : Tuple = tmp_path / """data_dot_dot"""
directory.mkdir()
A_ : List[Any] = directory / """tar_file_with_dot_dot.tar"""
with tarfile.TarFile(lowerCamelCase , """w""") as f:
f.add(lowerCamelCase , arcname=os.path.join("""..""" , text_file.name))
return path
@pytest.fixture
def lowerCamelCase ( lowerCamelCase : List[Any]):
import tarfile
A_ : str = tmp_path / """data_sym_link"""
directory.mkdir()
A_ : str = directory / """tar_file_with_sym_link.tar"""
os.symlink("""..""" , directory / """subdir""" , target_is_directory=lowerCamelCase)
with tarfile.TarFile(lowerCamelCase , """w""") as f:
f.add(str(directory / """subdir""") , arcname="""subdir""") # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"""insecure_tar_file, error_log""" , [("""tar_file_with_dot_dot""", """illegal path"""), ("""tar_file_with_sym_link""", """Symlink""")] , )
def lowerCamelCase ( lowerCamelCase : Dict , lowerCamelCase : str , lowerCamelCase : List[str] , lowerCamelCase : str , lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple):
A_ : List[str] = {
"""tar_file_with_dot_dot""": tar_file_with_dot_dot,
"""tar_file_with_sym_link""": tar_file_with_sym_link,
}
A_ : Union[str, Any] = insecure_tar_files[insecure_tar_file]
A_ : Optional[int] = tmp_path / """extracted"""
TarExtractor.extract(lowerCamelCase , lowerCamelCase)
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def lowerCamelCase ( lowerCamelCase : str):
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
A_ : str = tmpdir / """not_a_zip_file"""
# From: https://github.com/python/cpython/pull/5053
A_ : int = (
b"""\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"""
b"""\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"""
b"""DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"""
b"""\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"""
)
with not_a_zip_file.open("""wb""") as f:
f.write(lowerCamelCase)
assert zipfile.is_zipfile(str(lowerCamelCase)) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(lowerCamelCase) # but we're right
| 665 |
'''simple docstring'''
import functools
def lowerCamelCase ( lowerCamelCase : list[int] , lowerCamelCase : list[int]):
# Validation
if not isinstance(lowerCamelCase , lowerCamelCase) or not all(isinstance(lowerCamelCase , lowerCamelCase) for day in days):
raise ValueError("""The parameter days should be a list of integers""")
if len(lowerCamelCase) != 3 or not all(isinstance(lowerCamelCase , lowerCamelCase) for cost in costs):
raise ValueError("""The parameter costs should be a list of three integers""")
if len(lowerCamelCase) == 0:
return 0
if min(lowerCamelCase) <= 0:
raise ValueError("""All days elements should be greater than 0""")
if max(lowerCamelCase) >= 366:
raise ValueError("""All days elements should be less than 366""")
A_ : Tuple = set(lowerCamelCase)
@functools.cache
def dynamic_programming(lowerCamelCase : int) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1)
return min(
costs[0] + dynamic_programming(index + 1) , costs[1] + dynamic_programming(index + 7) , costs[2] + dynamic_programming(index + 30) , )
return dynamic_programming(1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
import collections
import importlib.util
import os
import re
from pathlib import Path
__magic_name__ = 'src/transformers'
# Matches is_xxx_available()
__magic_name__ = re.compile(r'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
__magic_name__ = re.compile(r'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__magic_name__ = re.compile(r'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
__magic_name__ = re.compile(r'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
__magic_name__ = re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__magic_name__ = re.compile(r'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
__magic_name__ = re.compile('^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
__magic_name__ = re.compile('^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
__magic_name__ = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
__magic_name__ = re.compile(r'^\s*try:')
# Catches a line with else:
__magic_name__ = re.compile(r'^\s*else:')
def lowerCamelCase ( lowerCamelCase : List[Any]):
if _re_test_backend.search(lowerCamelCase) is None:
return None
A_ : str = [b[0] for b in _re_backend.findall(lowerCamelCase)]
backends.sort()
return "_and_".join(lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
with open(lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""") as f:
A_ : List[str] = f.readlines()
A_ : int = 0
while line_index < len(lowerCamelCase) and not lines[line_index].startswith("""_import_structure = {"""):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowerCamelCase):
return None
# First grab the objects without a specific backend in _import_structure
A_ : Optional[int] = []
while not lines[line_index].startswith("""if TYPE_CHECKING""") and find_backend(lines[line_index]) is None:
A_ : Any = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowerCamelCase):
A_ : Dict = _re_one_line_import_struct.search(lowerCamelCase).groups()[0]
A_ : Optional[int] = re.findall("""\[([^\]]+)\]""" , lowerCamelCase)
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """)])
line_index += 1
continue
A_ : str = _re_import_struct_key_value.search(lowerCamelCase)
if single_line_import_search is not None:
A_ : Optional[int] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """) if len(lowerCamelCase) > 0]
objects.extend(lowerCamelCase)
elif line.startswith(""" """ * 8 + """\""""):
objects.append(line[9:-3])
line_index += 1
A_ : str = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING"""):
# If the line is an if not is_backend_available, we grab all objects associated.
A_ : Optional[int] = find_backend(lines[line_index])
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1]) is None:
A_ : Tuple = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index]) is None:
line_index += 1
line_index += 1
A_ : int = []
# Until we unindent, add backend objects to the list
while len(lines[line_index]) <= 1 or lines[line_index].startswith(""" """ * 4):
A_ : Union[str, Any] = lines[line_index]
if _re_import_struct_add_one.search(lowerCamelCase) is not None:
objects.append(_re_import_struct_add_one.search(lowerCamelCase).groups()[0])
elif _re_import_struct_add_many.search(lowerCamelCase) is not None:
A_ : Optional[int] = _re_import_struct_add_many.search(lowerCamelCase).groups()[0].split(""", """)
A_ : Dict = [obj[1:-1] for obj in imports if len(lowerCamelCase) > 0]
objects.extend(lowerCamelCase)
elif _re_between_brackets.search(lowerCamelCase) is not None:
A_ : int = _re_between_brackets.search(lowerCamelCase).groups()[0].split(""", """)
A_ : Tuple = [obj[1:-1] for obj in imports if len(lowerCamelCase) > 0]
objects.extend(lowerCamelCase)
elif _re_quote_object.search(lowerCamelCase) is not None:
objects.append(_re_quote_object.search(lowerCamelCase).groups()[0])
elif line.startswith(""" """ * 8 + """\""""):
objects.append(line[9:-3])
elif line.startswith(""" """ * 12 + """\""""):
objects.append(line[13:-3])
line_index += 1
A_ : str = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
A_ : Union[str, Any] = []
while (
line_index < len(lowerCamelCase)
and find_backend(lines[line_index]) is None
and not lines[line_index].startswith("""else""")
):
A_ : Optional[int] = lines[line_index]
A_ : str = _re_import.search(lowerCamelCase)
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """))
elif line.startswith(""" """ * 8):
objects.append(line[8:-2])
line_index += 1
A_ : List[str] = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(lowerCamelCase):
# If the line is an if is_backend_available, we grab all objects associated.
A_ : List[str] = find_backend(lines[line_index])
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1]) is None:
A_ : Tuple = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index]) is None:
line_index += 1
line_index += 1
A_ : Tuple = []
# Until we unindent, add backend objects to the list
while len(lines[line_index]) <= 1 or lines[line_index].startswith(""" """ * 8):
A_ : Any = lines[line_index]
A_ : Dict = _re_import.search(lowerCamelCase)
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """))
elif line.startswith(""" """ * 12):
objects.append(line[12:-2])
line_index += 1
A_ : Union[str, Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Any):
def find_duplicates(lowerCamelCase : Any):
return [k for k, v in collections.Counter(lowerCamelCase).items() if v > 1]
if list(import_dict_objects.keys()) != list(type_hint_objects.keys()):
return ["Both sides of the init do not have the same backends!"]
A_ : Optional[int] = []
for key in import_dict_objects.keys():
A_ : str = find_duplicates(import_dict_objects[key])
if duplicate_imports:
errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}')
A_ : Tuple = find_duplicates(type_hint_objects[key])
if duplicate_type_hints:
errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}')
if sorted(set(import_dict_objects[key])) != sorted(set(type_hint_objects[key])):
A_ : Optional[int] = """base imports""" if key == """none""" else F'{key} backend'
errors.append(F'Differences for {name}:')
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F' {a} in TYPE_HINT but not in _import_structure.')
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F' {a} in _import_structure but not in TYPE_HINT.')
return errors
def lowerCamelCase ( ):
A_ : Optional[Any] = []
for root, _, files in os.walk(lowerCamelCase):
if "__init__.py" in files:
A_ : Optional[int] = os.path.join(lowerCamelCase , """__init__.py""")
A_ : Tuple = parse_init(lowerCamelCase)
if objects is not None:
A_ : List[str] = analyze_results(*lowerCamelCase)
if len(lowerCamelCase) > 0:
A_ : List[str] = F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append("""\n""".join(lowerCamelCase))
if len(lowerCamelCase) > 0:
raise ValueError("""\n\n""".join(lowerCamelCase))
def lowerCamelCase ( ):
A_ : List[str] = []
for path, directories, files in os.walk(lowerCamelCase):
for folder in directories:
# Ignore private modules
if folder.startswith("""_"""):
directories.remove(lowerCamelCase)
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowerCamelCase) / folder).glob("""*.py"""))) == 0:
continue
A_ : Union[str, Any] = str((Path(lowerCamelCase) / folder).relative_to(lowerCamelCase))
A_ : int = short_path.replace(os.path.sep , """.""")
submodules.append(lowerCamelCase)
for fname in files:
if fname == "__init__.py":
continue
A_ : Union[str, Any] = str((Path(lowerCamelCase) / fname).relative_to(lowerCamelCase))
A_ : Tuple = short_path.replace(""".py""" , """""").replace(os.path.sep , """.""")
if len(submodule.split(""".""")) == 1:
submodules.append(lowerCamelCase)
return submodules
__magic_name__ = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def lowerCamelCase ( ):
# This is to make sure the transformers module imported is the one in the repo.
A_ : Any = importlib.util.spec_from_file_location(
"""transformers""" , os.path.join(lowerCamelCase , """__init__.py""") , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
A_ : Any = spec.loader.load_module()
A_ : int = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(lowerCamelCase) > 0:
A_ : int = """\n""".join(F'- {module}' for module in module_not_registered)
raise ValueError(
"""The following submodules are not properly registered in the main init of Transformers:\n"""
F'{list_of_modules}\n'
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""")
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 665 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def lowerCamelCase ( lowerCamelCase : NDArray[floataa] , lowerCamelCase : NDArray[floataa] , lowerCamelCase : list[int] , lowerCamelCase : int , ):
A_ , A_ : int = coefficient_matrix.shape
A_ , A_ : Union[str, Any] = constant_matrix.shape
if rowsa != colsa:
A_ : Any = F'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'
raise ValueError(lowerCamelCase)
if colsa != 1:
A_ : Tuple = F'Constant matrix must be nx1 but received {rowsa}x{colsa}'
raise ValueError(lowerCamelCase)
if rowsa != rowsa:
A_ : Dict = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
F'received {rowsa}x{colsa} and {rowsa}x{colsa}'
)
raise ValueError(lowerCamelCase)
if len(lowerCamelCase) != rowsa:
A_ : Union[str, Any] = (
"""Number of initial values must be equal to number of rows in coefficient """
F'matrix but received {len(lowerCamelCase)} and {rowsa}'
)
raise ValueError(lowerCamelCase)
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""")
A_ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1)
A_ , A_ : int = table.shape
strictly_diagonally_dominant(lowerCamelCase)
# Iterates the whole matrix for given number of times
for _ in range(lowerCamelCase):
A_ : List[Any] = []
for row in range(lowerCamelCase):
A_ : int = 0
for col in range(lowerCamelCase):
if col == row:
A_ : List[str] = table[row][col]
elif col == cols - 1:
A_ : str = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
A_ : Union[str, Any] = (temp + val) / denom
new_val.append(lowerCamelCase)
A_ : Tuple = new_val
return [float(lowerCamelCase) for i in new_val]
def lowerCamelCase ( lowerCamelCase : NDArray[floataa]):
A_ , A_ : Dict = table.shape
A_ : Union[str, Any] = True
for i in range(0 , lowerCamelCase):
A_ : str = 0
for j in range(0 , cols - 1):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""")
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class __lowerCAmelCase :
'''simple docstring'''
@property
def _a ( self : Dict ):
'''simple docstring'''
return self.get_dummy_input()
@property
def _a ( self : Union[str, Any] ):
'''simple docstring'''
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.' )
def _a ( self : Any ,_a : Optional[int]=True ,_a : List[str]=False ,_a : int=False ,_a : str=False ,):
'''simple docstring'''
A_ : str = 4
A_ : Tuple = 32
A_ : Union[str, Any] = (32, 32)
A_ : Optional[Any] = torch.manual_seed(0 )
A_ : Dict = torch.device(_a )
A_ : Optional[int] = (batch_size, num_channels) + sizes
A_ : List[str] = randn_tensor(_a ,generator=_a ,device=_a )
A_ : str = {"""hidden_states""": hidden_states}
if include_temb:
A_ : List[str] = 128
A_ : str = randn_tensor((batch_size, temb_channels) ,generator=_a ,device=_a )
if include_res_hidden_states_tuple:
A_ : List[str] = torch.manual_seed(1 )
A_ : Union[str, Any] = (randn_tensor(_a ,generator=_a ,device=_a ),)
if include_encoder_hidden_states:
A_ : List[Any] = floats_tensor((batch_size, 32, 32) ).to(_a )
if include_skip_sample:
A_ : Any = randn_tensor(((batch_size, 3) + sizes) ,generator=_a ,device=_a )
return dummy_input
def _a ( self : str ):
'''simple docstring'''
A_ : Optional[int] = {
"""in_channels""": 32,
"""out_channels""": 32,
"""temb_channels""": 128,
}
if self.block_type == "up":
A_ : int = 32
if self.block_type == "mid":
init_dict.pop("""out_channels""" )
A_ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def _a ( self : List[Any] ,_a : Any ):
'''simple docstring'''
A_ , A_ : Union[str, Any] = self.prepare_init_args_and_inputs_for_common()
A_ : Optional[Any] = self.block_class(**_a )
unet_block.to(_a )
unet_block.eval()
with torch.no_grad():
A_ : Union[str, Any] = unet_block(**_a )
if isinstance(_a ,_a ):
A_ : Union[str, Any] = output[0]
self.assertEqual(output.shape ,self.output_shape )
A_ : Any = output[0, -1, -3:, -3:]
A_ : str = torch.tensor(_a ).to(_a )
assert torch_all_close(output_slice.flatten() ,_a ,atol=5e-3 )
@unittest.skipIf(torch_device == """mps""" ,"""Training is not supported in mps""" )
def _a ( self : str ):
'''simple docstring'''
A_ , A_ : List[Any] = self.prepare_init_args_and_inputs_for_common()
A_ : str = self.block_class(**_a )
model.to(_a )
model.train()
A_ : int = model(**_a )
if isinstance(_a ,_a ):
A_ : str = output[0]
A_ : Dict = torch.device(_a )
A_ : List[str] = randn_tensor(output.shape ,device=_a )
A_ : Tuple = torch.nn.functional.mse_loss(_a ,_a )
loss.backward()
| 665 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str):
A_ : Any = len(lowerCamelCase)
A_ : Optional[Any] = len(lowerCamelCase)
A_ : Optional[int] = [[False for _ in range(m + 1)] for _ in range(n + 1)]
A_ : Union[str, Any] = True
for i in range(lowerCamelCase):
for j in range(m + 1):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
A_ : Optional[int] = True
if a[i].islower():
A_ : List[Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
__magic_name__ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : str ,*_a : Any ,**_a : str ):
'''simple docstring'''
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" ,_a ,)
super().__init__(*_a ,**_a )
| 665 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
a_ = 42
a_ = 42
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ,_a : int ):
'''simple docstring'''
A_ : list[list[Edge]] = [[] for _ in range(_a )]
A_ : List[Any] = size
def __getitem__( self : int ,_a : int ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def _a ( self : str ):
'''simple docstring'''
return self._size
def _a ( self : str ,_a : int ,_a : int ,_a : int ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(_a ,_a ) )
def _a ( self : Dict ,_a : int ,_a : int ):
'''simple docstring'''
A_ : Tuple = deque([start_vertex] )
A_ : list[int | None] = [None] * self.size
A_ : Union[str, Any] = 0
while queue:
A_ : List[Any] = queue.popleft()
A_ : Tuple = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
A_ : Union[str, Any] = current_distance + edge.weight
A_ : Optional[Any] = distances[edge.destination_vertex]
if (
isinstance(_a ,_a )
and new_distance >= dest_vertex_distance
):
continue
A_ : Tuple = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _a ( self : str ):
'''simple docstring'''
A_ : Optional[Any] = TFXLMRobertaModel.from_pretrained("""jplu/tf-xlm-roberta-base""" )
A_ : Optional[Any] = {
"""input_ids""": tf.convert_to_tensor([[0, 2646, 10269, 83, 99942, 2]] ,dtype=tf.intaa ), # "My dog is cute"
"""attention_mask""": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] ,dtype=tf.intaa ),
}
A_ : int = model(_a )["""last_hidden_state"""]
A_ : List[str] = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape ,_a )
# compare the actual values for a slice.
A_ : str = tf.convert_to_tensor(
[
[
[0.0681762, 0.10894451, 0.06772504],
[-0.06423668, 0.02366615, 0.04329344],
[-0.06057295, 0.09974135, -0.00070584],
]
] ,dtype=tf.floataa ,)
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 665 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : int = 10**9):
A_ : Optional[int] = 1
A_ : int = 2
A_ : List[Any] = 0
A_ : Optional[Any] = 0
A_ : str = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
A_ : Optional[Any] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 665 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : list):
A_ : Union[str, Any] = len(lowerCamelCase)
for _ in range(lowerCamelCase):
for i in range(_ % 2 , arr_size - 1 , 2):
if arr[i + 1] < arr[i]:
A_ , A_ : Union[str, Any] = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
__magic_name__ = list(range(10, 0, -1))
print(f"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 665 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def lowerCamelCase ( ):
A_ : Optional[int] = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=lowerCamelCase)
A_ : Optional[int] = parser.add_subparsers(help="""accelerate command helpers""")
# Register commands
get_config_parser(subparsers=lowerCamelCase)
env_command_parser(subparsers=lowerCamelCase)
launch_command_parser(subparsers=lowerCamelCase)
tpu_command_parser(subparsers=lowerCamelCase)
test_command_parser(subparsers=lowerCamelCase)
# Let's go
A_ : Dict = parser.parse_args()
if not hasattr(lowerCamelCase , """func"""):
parser.print_help()
exit(1)
# Run
args.func(lowerCamelCase)
if __name__ == "__main__":
main()
| 665 | 1 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
__magic_name__ = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def lowerCamelCase ( lowerCamelCase : Optional[int]):
for pegasus_name, hf_name in PATTERNS:
A_ : Dict = k.replace(lowerCamelCase , lowerCamelCase)
return k
def lowerCamelCase ( lowerCamelCase : dict , lowerCamelCase : dict):
A_ : List[Any] = DEFAULTS.copy()
cfg_kwargs.update(lowerCamelCase)
A_ : str = PegasusConfig(**lowerCamelCase)
A_ : Union[str, Any] = PegasusForConditionalGeneration(lowerCamelCase)
A_ : Optional[int] = torch_model.model.state_dict()
A_ : List[str] = {}
for k, v in tf_weights.items():
A_ : Union[str, Any] = rename_state_dict_key(lowerCamelCase)
if new_k not in sd:
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})')
if "dense" in k or "proj" in new_k:
A_ : Any = v.T
A_ : Any = torch.tensor(lowerCamelCase , dtype=sd[new_k].dtype)
assert v.shape == sd[new_k].shape, F'{new_k}, {k}, {v.shape}, {sd[new_k].shape}'
# make sure embedding.padding_idx is respected
A_ : str = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1])
A_ : Union[str, Any] = mapping["""shared.weight"""]
A_ : Optional[Any] = mapping["""shared.weight"""]
A_ : Any = {k: torch.zeros_like(lowerCamelCase) for k, v in sd.items() if k.endswith("""bias""") and k not in mapping}
mapping.update(**lowerCamelCase)
A_ , A_ : Dict = torch_model.model.load_state_dict(lowerCamelCase , strict=lowerCamelCase)
A_ : Optional[Any] = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], F'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], F'no matches found for the following tf keys {extra}'
return torch_model
def lowerCamelCase ( lowerCamelCase : str="./ckpt/aeslc/model.ckpt-32000"):
A_ : List[Any] = tf.train.list_variables(lowerCamelCase)
A_ : Optional[Any] = {}
A_ : List[str] = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(lowerCamelCase , desc="""converting tf checkpoint to dict"""):
A_ : Union[str, Any] = any(pat in name for pat in ignore_name)
if skip_key:
continue
A_ : Optional[int] = tf.train.load_variable(lowerCamelCase , lowerCamelCase)
A_ : str = array
return tf_weights
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str):
# save tokenizer first
A_ : List[Any] = Path(lowerCamelCase).parent.name
A_ : int = task_specific_params[F'summarization_{dataset}']["""max_position_embeddings"""]
A_ : int = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=lowerCamelCase)
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(lowerCamelCase)
# convert model
A_ : Tuple = get_tf_weights_as_numpy(lowerCamelCase)
A_ : Optional[int] = task_specific_params[F'summarization_{dataset}']
if dataset == "large":
A_ : Any = task_specific_params
A_ : Tuple = convert_pegasus(lowerCamelCase , lowerCamelCase)
torch_model.save_pretrained(lowerCamelCase)
A_ : List[Any] = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""")
sd.pop("""model.encoder.embed_positions.weight""")
torch.save(lowerCamelCase , Path(lowerCamelCase) / """pytorch_model.bin""")
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.')
__magic_name__ = parser.parse_args()
if args.save_dir is None:
__magic_name__ = Path(args.tf_ckpt_path).parent.name
__magic_name__ = os.path.join('pegasus', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 665 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all BART models at https://huggingface.co/models?filter=bart
__magic_name__ = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
'tokenizer_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json',
},
}
__magic_name__ = {
'facebook/bart-base': 1_024,
'facebook/bart-large': 1_024,
'facebook/bart-large-mnli': 1_024,
'facebook/bart-large-cnn': 1_024,
'facebook/bart-large-xsum': 1_024,
'yjernite/bart_eli5': 1_024,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
a_ = BartTokenizer
def __init__( self : str ,_a : Any=None ,_a : Optional[int]=None ,_a : int=None ,_a : Optional[int]="replace" ,_a : Dict="<s>" ,_a : Optional[Any]="</s>" ,_a : Dict="</s>" ,_a : Tuple="<s>" ,_a : Optional[Any]="<unk>" ,_a : List[str]="<pad>" ,_a : int="<mask>" ,_a : str=False ,_a : List[str]=True ,**_a : Dict ,):
'''simple docstring'''
super().__init__(
_a ,_a ,tokenizer_file=_a ,errors=_a ,bos_token=_a ,eos_token=_a ,sep_token=_a ,cls_token=_a ,unk_token=_a ,pad_token=_a ,mask_token=_a ,add_prefix_space=_a ,trim_offsets=_a ,**_a ,)
A_ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" ,_a ) != add_prefix_space:
A_ : List[str] = getattr(_a ,pre_tok_state.pop("""type""" ) )
A_ : Optional[int] = add_prefix_space
A_ : int = pre_tok_class(**_a )
A_ : str = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A_ : str = """post_processor"""
A_ : List[Any] = getattr(self.backend_tokenizer ,_a ,_a )
if tokenizer_component_instance:
A_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A_ : Tuple = tuple(state["""sep"""] )
if "cls" in state:
A_ : Tuple = tuple(state["""cls"""] )
A_ : List[str] = False
if state.get("""add_prefix_space""" ,_a ) != add_prefix_space:
A_ : Dict = add_prefix_space
A_ : Any = True
if state.get("""trim_offsets""" ,_a ) != trim_offsets:
A_ : Union[str, Any] = trim_offsets
A_ : List[Any] = True
if changes_to_apply:
A_ : Optional[int] = getattr(_a ,state.pop("""type""" ) )
A_ : Tuple = component_class(**_a )
setattr(self.backend_tokenizer ,_a ,_a )
@property
def _a ( self : List[str] ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def _a ( self : Union[str, Any] ,_a : Any ):
'''simple docstring'''
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else value
A_ : List[Any] = value
def _a ( self : str ,*_a : str ,**_a : Optional[int] ):
'''simple docstring'''
A_ : Optional[Any] = kwargs.get("""is_split_into_words""" ,_a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*_a ,**_a )
def _a ( self : str ,*_a : List[Any] ,**_a : str ):
'''simple docstring'''
A_ : List[str] = kwargs.get("""is_split_into_words""" ,_a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*_a ,**_a )
def _a ( self : Optional[int] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
A_ : str = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
def _a ( self : str ,_a : Optional[int] ,_a : int=None ):
'''simple docstring'''
A_ : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _a ( self : Optional[int] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Dict = [self.sep_token_id]
A_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 665 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['YolosFeatureExtractor']
__magic_name__ = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 1 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : str ,_a : Dict ,_a : List[str]=13 ,_a : List[str]=10 ,_a : Dict=3 ,_a : Any=2 ,_a : Optional[int]=2 ,_a : Any=True ,_a : int=True ,_a : str=32 ,_a : str=5 ,_a : Dict=4 ,_a : int=37 ,_a : Optional[Any]="gelu" ,_a : Optional[int]=0.1 ,_a : List[str]=0.1 ,_a : Optional[int]=10 ,_a : int=0.02 ,_a : Tuple="divided_space_time" ,_a : Any=None ,):
'''simple docstring'''
A_ : Dict = parent
A_ : Dict = batch_size
A_ : str = image_size
A_ : Dict = num_channels
A_ : int = patch_size
A_ : Any = num_frames
A_ : Any = is_training
A_ : Tuple = use_labels
A_ : List[Any] = hidden_size
A_ : List[Any] = num_hidden_layers
A_ : Any = num_attention_heads
A_ : List[str] = intermediate_size
A_ : List[str] = hidden_act
A_ : Union[str, Any] = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : Any = attention_type
A_ : Optional[Any] = initializer_range
A_ : str = scope
A_ : List[str] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
A_ : Any = (image_size // patch_size) ** 2
A_ : Dict = (num_frames) * self.num_patches_per_frame + 1
def _a ( self : str ):
'''simple docstring'''
A_ : int = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
A_ : List[Any] = None
if self.use_labels:
A_ : Tuple = ids_tensor([self.batch_size] ,self.num_labels )
A_ : Tuple = self.get_config()
return config, pixel_values, labels
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : int = TimesformerConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,num_frames=self.num_frames ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,attention_type=self.attention_type ,)
A_ : int = self.num_labels
return config
def _a ( self : Any ,_a : str ,_a : int ,_a : Union[str, Any] ):
'''simple docstring'''
A_ : List[str] = TimesformerModel(config=_a )
model.to(_a )
model.eval()
A_ : str = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : int ,_a : Any ,_a : str ,_a : List[str] ):
'''simple docstring'''
A_ : Tuple = TimesformerForVideoClassification(_a )
model.to(_a )
model.eval()
A_ : str = model(_a )
# verify the logits shape
A_ : Tuple = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape ,_a )
def _a ( self : str ):
'''simple docstring'''
A_ : Tuple = self.prepare_config_and_inputs()
A_ , A_ , A_ : Tuple = config_and_inputs
A_ : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
a_ = (
{"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
a_ = False
a_ = False
a_ = False
a_ = False
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Tuple = TimesformerModelTester(self )
A_ : List[str] = ConfigTester(
self ,config_class=_a ,has_text_modality=_a ,hidden_size=37 )
def _a ( self : List[Any] ,_a : Tuple ,_a : Dict ,_a : Tuple=False ):
'''simple docstring'''
A_ : List[Any] = copy.deepcopy(_a )
if return_labels:
if model_class in get_values(_a ):
A_ : Optional[Any] = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=_a )
return inputs_dict
def _a ( self : Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""TimeSformer does not use inputs_embeds""" )
def _a ( self : Any ):
'''simple docstring'''
pass
def _a ( self : str ):
'''simple docstring'''
A_ , A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[str] = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
A_ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a ,nn.Linear ) )
def _a ( self : Any ):
'''simple docstring'''
A_ , A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Dict = model_class(_a )
A_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : str = [*signature.parameters.keys()]
A_ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,_a )
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _a ( self : int ):
'''simple docstring'''
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*_a )
@slow
def _a ( self : List[Any] ):
'''simple docstring'''
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Dict = TimesformerModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def _a ( self : int ):
'''simple docstring'''
if not self.has_attentions:
pass
else:
A_ , A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[str] = True
for model_class in self.all_model_classes:
A_ : Tuple = self.model_tester.seq_length
A_ : List[Any] = self.model_tester.num_frames
A_ : Optional[Any] = True
A_ : str = False
A_ : Any = True
A_ : List[Any] = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
A_ : List[Any] = model(**self._prepare_for_class(_a ,_a ) )
A_ : int = outputs.attentions
self.assertEqual(len(_a ) ,self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A_ : Dict = True
A_ : Optional[Any] = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
A_ : Optional[int] = model(**self._prepare_for_class(_a ,_a ) )
A_ : List[str] = outputs.attentions
self.assertEqual(len(_a ) ,self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] ,)
A_ : int = len(_a )
# Check attention is always last and order is fine
A_ : Optional[int] = True
A_ : Any = True
A_ : Union[str, Any] = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
A_ : List[str] = model(**self._prepare_for_class(_a ,_a ) )
self.assertEqual(out_len + 1 ,len(_a ) )
A_ : Optional[Any] = outputs.attentions
self.assertEqual(len(_a ) ,self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] ,)
def _a ( self : Optional[Any] ):
'''simple docstring'''
def check_hidden_states_output(_a : Optional[int] ,_a : Tuple ,_a : int ):
A_ : Optional[int] = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
A_ : Union[str, Any] = model(**self._prepare_for_class(_a ,_a ) )
A_ : str = outputs.hidden_states
A_ : Dict = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(_a ) ,_a )
A_ : Union[str, Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[seq_length, self.model_tester.hidden_size] ,)
A_ , A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[Any] = True
check_hidden_states_output(_a ,_a ,_a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : Optional[int] = True
check_hidden_states_output(_a ,_a ,_a )
def lowerCamelCase ( ):
A_ : List[Any] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""")
A_ : int = np.load(lowerCamelCase)
return list(lowerCamelCase)
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _a ( self : str ):
'''simple docstring'''
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] ,image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Any = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to(
_a )
A_ : Optional[Any] = self.default_image_processor
A_ : Dict = prepare_video()
A_ : Dict = image_processor(video[:8] ,return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
A_ : Optional[int] = model(**_a )
# verify the logits
A_ : Optional[Any] = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape ,_a )
A_ : Dict = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_a ,atol=1e-4 ) )
| 665 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 1 |
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
__magic_name__ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class __lowerCAmelCase ( datasets.BuilderConfig ):
'''simple docstring'''
a_ = None
def lowerCamelCase ( lowerCamelCase : "pyspark.sql.DataFrame" , lowerCamelCase : List[int] , ):
import pyspark
def generate_fn():
A_ : str = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id"""))
for partition_id in partition_order:
A_ : List[str] = df_with_partition_id.select("""*""").where(F'part_id = {partition_id}').drop("""part_id""")
A_ : Optional[int] = partition_df.collect()
A_ : Dict = 0
for row in rows:
yield F'{partition_id}_{row_id}', row.asDict()
row_id += 1
return generate_fn
class __lowerCAmelCase ( _BaseExamplesIterable ):
'''simple docstring'''
def __init__( self : int ,_a : "pyspark.sql.DataFrame" ,_a : Optional[int]=None ,):
'''simple docstring'''
A_ : Optional[Any] = df
A_ : Any = partition_order or range(self.df.rdd.getNumPartitions() )
A_ : List[Any] = _generate_iterable_examples(self.df ,self.partition_order )
def __iter__( self : List[Any] ):
'''simple docstring'''
yield from self.generate_examples_fn()
def _a ( self : List[str] ,_a : np.random.Generator ):
'''simple docstring'''
A_ : str = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(_a )
return SparkExamplesIterable(self.df ,partition_order=_a )
def _a ( self : Optional[int] ,_a : int ,_a : int ):
'''simple docstring'''
A_ : Union[str, Any] = self.split_shard_indices_by_worker(_a ,_a )
return SparkExamplesIterable(self.df ,partition_order=_a )
@property
def _a ( self : Any ):
'''simple docstring'''
return len(self.partition_order )
class __lowerCAmelCase ( datasets.DatasetBuilder ):
'''simple docstring'''
a_ = SparkConfig
def __init__( self : List[str] ,_a : "pyspark.sql.DataFrame" ,_a : str = None ,_a : str = None ,**_a : int ,):
'''simple docstring'''
import pyspark
A_ : Union[str, Any] = pyspark.sql.SparkSession.builder.getOrCreate()
A_ : Optional[Any] = df
A_ : Union[str, Any] = working_dir
super().__init__(
cache_dir=_a ,config_name=str(self.df.semanticHash() ) ,**_a ,)
def _a ( self : Union[str, Any] ):
'''simple docstring'''
def create_cache_and_write_probe(_a : Dict ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir ,exist_ok=_a )
A_ : Tuple = os.path.join(self._cache_dir ,"""fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(_a ,"""a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" ,"""""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
A_ : Any = (
self._spark.sparkContext.parallelize(range(1 ) ,1 ).mapPartitions(_a ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def _a ( self : Optional[Any] ,_a : datasets.download.download_manager.DownloadManager ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def _a ( self : List[Any] ,_a : List[str] ):
'''simple docstring'''
import pyspark
def get_arrow_batch_size(_a : int ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
A_ : Union[str, Any] = self.df.count()
A_ : int = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
A_ : str = (
self.df.limit(_a )
.repartition(1 )
.mapInArrow(_a ,"""batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
A_ : Tuple = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
A_ : int = min(_a ,int(approx_total_size / max_shard_size ) )
A_ : Any = self.df.repartition(_a )
def _a ( self : List[str] ,_a : str ,_a : str ,_a : int ,):
'''simple docstring'''
import pyspark
A_ : str = ParquetWriter if file_format == """parquet""" else ArrowWriter
A_ : Any = os.path.join(self._working_dir ,os.path.basename(_a ) ) if self._working_dir else fpath
A_ : Tuple = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
A_ : int = self.config.features
A_ : Optional[Any] = self._writer_batch_size
A_ : List[str] = self._fs.storage_options
def write_arrow(_a : Any ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
A_ : Union[str, Any] = pyspark.TaskContext().taskAttemptId()
A_ : Any = next(_a ,_a )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,)
A_ : List[Any] = 0
A_ : List[str] = writer_class(
features=_a ,path=working_fpath.replace("""SSSSS""" ,f'{shard_id:05d}' ).replace("""TTTTT""" ,f'{task_id:05d}' ) ,writer_batch_size=_a ,storage_options=_a ,embed_local_files=_a ,)
A_ : Union[str, Any] = pa.Table.from_batches([first_batch] )
writer.write_table(_a )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
A_ , A_ : List[Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,)
shard_id += 1
A_ : Optional[Any] = writer_class(
features=writer._features ,path=working_fpath.replace("""SSSSS""" ,f'{shard_id:05d}' ).replace("""TTTTT""" ,f'{task_id:05d}' ) ,writer_batch_size=_a ,storage_options=_a ,embed_local_files=_a ,)
A_ : str = pa.Table.from_batches([batch] )
writer.write_table(_a )
if writer._num_bytes > 0:
A_ , A_ : Optional[Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,)
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(_a ) ):
A_ : int = os.path.join(os.path.dirname(_a ) ,os.path.basename(_a ) )
shutil.move(_a ,_a )
A_ : List[Any] = (
self.df.mapInArrow(_a ,"""task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) ,pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) ,pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) ,pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) ,)
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def _a ( self : Optional[Any] ,_a : "datasets.SplitGenerator" ,_a : str = "arrow" ,_a : Optional[Union[str, int]] = None ,_a : Optional[int] = None ,**_a : int ,):
'''simple docstring'''
self._validate_cache_dir()
A_ : Tuple = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(_a )
A_ : List[Any] = not is_remote_filesystem(self._fs )
A_ : List[Any] = os.path.join if is_local else posixpath.join
A_ : List[Any] = """-TTTTT-SSSSS-of-NNNNN"""
A_ : Any = f'{self.name}-{split_generator.name}{SUFFIX}.{file_format}'
A_ : Optional[Any] = path_join(self._output_dir ,_a )
A_ : Any = 0
A_ : Dict = 0
A_ : Dict = 0
A_ : int = []
A_ : List[Any] = []
for task_id, content in self._prepare_split_single(_a ,_a ,_a ):
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Tuple = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(_a )
A_ : Optional[Any] = total_num_examples
A_ : Optional[Any] = total_num_bytes
# should rename everything at the end
logger.debug(f'Renaming {total_shards} shards.' )
if total_shards > 1:
A_ : Any = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
A_ : Optional[int] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
_a : int ,_a : int ,_a : int ,):
rename(
_a ,fpath.replace("""SSSSS""" ,f'{shard_id:05d}' ).replace("""TTTTT""" ,f'{task_id:05d}' ) ,fpath.replace("""TTTTT-SSSSS""" ,f'{global_shard_id:05d}' ).replace("""NNNNN""" ,f'{total_shards:05d}' ) ,)
A_ : List[str] = []
A_ : int = 0
for i in range(len(_a ) ):
A_ , A_ : str = task_id_and_num_shards[i]
for shard_id in range(_a ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(_a ,len(_a ) ).map(lambda _a : _rename_shard(*_a ) ).collect()
else:
# don't use any pattern
A_ : List[str] = 0
A_ : Tuple = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" ,f'{shard_id:05d}' ).replace("""TTTTT""" ,f'{task_id:05d}' ) ,fpath.replace(_a ,"""""" ) ,)
def _a ( self : Union[str, Any] ,_a : "datasets.SplitGenerator" ,):
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 665 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : Tuple):
A_ : str = [0] * len(lowerCamelCase)
A_ : Union[str, Any] = []
A_ : Union[str, Any] = []
A_ : Tuple = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowerCamelCase)):
if indegree[i] == 0:
queue.append(lowerCamelCase)
while queue:
A_ : Any = queue.pop(0)
cnt += 1
topo.append(lowerCamelCase)
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(lowerCamelCase)
if cnt != len(lowerCamelCase):
print("""Cycle exists""")
else:
print(lowerCamelCase)
# Adjacency List of Graph
__magic_name__ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 665 | 1 |
'''simple docstring'''
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
__magic_name__ = 0B1_0_1_1_0_0_1_1_1_1_1_0_1_1_0_0_1_0_0_1_0_0_0_0_0_1_1_1_1_0_1_1_1_0_1_1_0_0_0_1_1_0_0_1_1_1_1_0
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
__magic_name__ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] ):
'''simple docstring'''
A_ : Optional[Any] = WATERMARK_BITS
A_ : List[str] = WatermarkEncoder()
self.encoder.set_watermark("""bits""" ,self.watermark )
def _a ( self : Any ,_a : torch.FloatTensor ):
'''simple docstring'''
if images.shape[-1] < 256:
return images
A_ : Union[str, Any] = (255 * (images / 2 + 0.5)).cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
A_ : Union[str, Any] = [self.encoder.encode(_a ,"""dwtDct""" ) for image in images]
A_ : Union[str, Any] = torch.from_numpy(np.array(_a ) ).permute(0 ,3 ,1 ,2 )
A_ : Tuple = torch.clamp(2 * (images / 255 - 0.5) ,min=-1.0 ,max=1.0 )
return images
| 665 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ,_a : List[Any] ,_a : Dict=13 ,_a : List[str]=7 ,_a : Dict=True ,_a : List[Any]=True ,_a : Dict=False ,_a : Optional[int]=True ,_a : List[Any]=99 ,_a : Any=32 ,_a : Optional[int]=5 ,_a : List[Any]=4 ,_a : int=37 ,_a : List[Any]="gelu" ,_a : List[str]=0.1 ,_a : Union[str, Any]=0.1 ,_a : Any=512 ,_a : int=16 ,_a : Optional[int]=2 ,_a : Any=0.02 ,_a : Any=3 ,_a : Any=4 ,_a : List[str]=None ,):
'''simple docstring'''
A_ : List[str] = parent
A_ : Any = batch_size
A_ : Tuple = seq_length
A_ : List[str] = is_training
A_ : Tuple = use_input_mask
A_ : Dict = use_token_type_ids
A_ : List[Any] = use_labels
A_ : Union[str, Any] = vocab_size
A_ : Any = hidden_size
A_ : str = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : str = intermediate_size
A_ : Tuple = hidden_act
A_ : Any = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : List[str] = max_position_embeddings
A_ : int = type_vocab_size
A_ : Union[str, Any] = type_sequence_label_size
A_ : Any = initializer_range
A_ : List[Any] = num_labels
A_ : Optional[Any] = num_choices
A_ : List[Any] = scope
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A_ : int = None
if self.use_input_mask:
A_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : Dict = None
if self.use_token_type_ids:
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A_ : str = None
A_ : Any = None
A_ : str = None
if self.use_labels:
A_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A_ : Optional[int] = ids_tensor([self.batch_size] ,self.num_choices )
A_ : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : Optional[Any] ):
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_a ,initializer_range=self.initializer_range ,)
def _a ( self : Union[str, Any] ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Any ,_a : Any ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Tuple ):
'''simple docstring'''
A_ : Any = LlamaModel(config=_a )
model.to(_a )
model.eval()
A_ : Optional[Any] = model(_a ,attention_mask=_a )
A_ : Optional[int] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Optional[int] ,_a : int ,_a : List[str] ,_a : Any ,_a : Any ,_a : Dict ,_a : List[str] ,_a : Optional[int] ,_a : Any ,_a : List[str] ,):
'''simple docstring'''
A_ : List[str] = True
A_ : Union[str, Any] = LlamaModel(_a )
model.to(_a )
model.eval()
A_ : Tuple = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,)
A_ : List[Any] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,)
A_ : int = model(_a ,attention_mask=_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Any ,_a : Any ,_a : Optional[int] ,_a : List[Any] ,_a : List[Any] ,_a : Dict ,_a : Tuple ,_a : Optional[int] ,_a : List[Any] ,_a : Union[str, Any] ,):
'''simple docstring'''
A_ : List[Any] = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
A_ : Dict = model(_a ,attention_mask=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : str ,_a : List[Any] ,_a : Dict ,_a : str ,_a : Tuple ,_a : Tuple ,_a : Tuple ,_a : Optional[Any] ,_a : Dict ,_a : Union[str, Any] ,):
'''simple docstring'''
A_ : Optional[Any] = True
A_ : Any = True
A_ : Tuple = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
# first forward pass
A_ : Optional[int] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,use_cache=_a ,)
A_ : Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A_ : int = ids_tensor((self.batch_size, 3) ,config.vocab_size )
A_ : List[Any] = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
A_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 )
A_ : int = torch.cat([input_mask, next_mask] ,dim=-1 )
A_ : List[str] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,output_hidden_states=_a ,)["""hidden_states"""][0]
A_ : Any = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,past_key_values=_a ,output_hidden_states=_a ,)["""hidden_states"""][0]
# select random slice
A_ : List[str] = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
A_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
A_ : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a ,_a ,atol=1e-3 ) )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Any = config_and_inputs
A_ : int = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
a_ = (LlamaForCausalLM,) if is_torch_available() else ()
a_ = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ = False
a_ = False
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = LlamaModelTester(self )
A_ : List[str] = ConfigTester(self ,config_class=_a ,hidden_size=37 )
def _a ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A_ : Dict = type
self.model_tester.create_and_check_model(*_a )
def _a ( self : List[Any] ):
'''simple docstring'''
A_ , A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[str] = 3
A_ : Any = input_dict["""input_ids"""]
A_ : Union[str, Any] = input_ids.ne(1 ).to(_a )
A_ : Union[str, Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
A_ : List[Any] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : int = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : Dict ):
'''simple docstring'''
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : str = 3
A_ : Union[str, Any] = """single_label_classification"""
A_ : Union[str, Any] = input_dict["""input_ids"""]
A_ : List[Any] = input_ids.ne(1 ).to(_a )
A_ : Dict = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
A_ : List[Any] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : List[str] = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Dict = 3
A_ : Dict = """multi_label_classification"""
A_ : Any = input_dict["""input_ids"""]
A_ : Optional[Any] = input_ids.ne(1 ).to(_a )
A_ : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
A_ : Optional[int] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : Any = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" )
def _a ( self : Any ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _a ( self : Optional[Any] ,_a : List[Any] ):
'''simple docstring'''
A_ , A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Tuple = ids_tensor([1, 10] ,config.vocab_size )
A_ : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A_ : int = LlamaModel(_a )
original_model.to(_a )
original_model.eval()
A_ : Tuple = original_model(_a ).last_hidden_state
A_ : Union[str, Any] = original_model(_a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A_ : Tuple = {"""type""": scaling_type, """factor""": 10.0}
A_ : int = LlamaModel(_a )
scaled_model.to(_a )
scaled_model.eval()
A_ : List[Any] = scaled_model(_a ).last_hidden_state
A_ : Any = scaled_model(_a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_a ,_a ,atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_a ,_a ,atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_a ,_a ,atol=1e-5 ) )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Any = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : List[str] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" ,device_map="""auto""" )
A_ : str = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
A_ : Union[str, Any] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : str = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : str ):
'''simple docstring'''
A_ : Dict = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" ,device_map="""auto""" )
A_ : Tuple = model(torch.tensor(_a ) )
# Expected mean on dim = -1
A_ : str = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : str = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" ,device_map="""auto""" )
A_ : int = model(torch.tensor(_a ) )
# Expected mean on dim = -1
A_ : Union[str, Any] = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : Optional[int] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
@unittest.skip(
"""Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" )
@slow
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : str = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" ,device_map="""auto""" )
A_ : Tuple = model(torch.tensor(_a ) )
A_ : Dict = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# fmt: off
A_ : List[str] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Model is curently gated""" )
@slow
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Union[str, Any] = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
A_ : List[str] = """Simply put, the theory of relativity states that """
A_ : Any = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" )
A_ : Union[str, Any] = tokenizer.encode(_a ,return_tensors="""pt""" )
A_ : List[str] = LlamaForCausalLM.from_pretrained(
"""meta-llama/Llama-2-13b-chat-hf""" ,device_map="""sequential""" ,use_safetensors=_a )
# greedy generation outputs
A_ : str = model.generate(_a ,max_new_tokens=64 ,top_p=_a ,temperature=1 ,do_sample=_a )
A_ : Optional[Any] = tokenizer.decode(generated_ids[0] ,skip_special_tokens=_a )
self.assertEqual(_a ,_a )
| 665 | 1 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Generic, TypeVar
__magic_name__ = TypeVar('_T')
class __lowerCAmelCase ( Generic[_T] ):
'''simple docstring'''
def __init__( self : List[Any] ,_a : Iterable[_T] | None = None ):
'''simple docstring'''
A_ : list[_T] = list(iterable or [] )
A_ : list[_T] = []
def __len__( self : int ):
'''simple docstring'''
return len(self._stacka ) + len(self._stacka )
def __repr__( self : Any ):
'''simple docstring'''
return f'Queue({tuple(self._stacka[::-1] + self._stacka )})'
def _a ( self : List[Any] ,_a : _T ):
'''simple docstring'''
self._stacka.append(_a )
def _a ( self : Dict ):
'''simple docstring'''
A_ : List[Any] = self._stacka.pop
A_ : Optional[int] = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("""Queue is empty""" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 665 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__magic_name__ = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
__magic_name__ = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
__magic_name__ = r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def _a ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ),
"""references""": datasets.Value("""string""" ),
} ) ,homepage="""https://github.com/hendrycks/math""" ,codebase_urls=["""https://github.com/hendrycks/math"""] ,)
def _a ( self : List[Any] ,_a : Union[str, Any] ,_a : Optional[int] ):
'''simple docstring'''
A_ : Union[str, Any] = 0.0
for i, j in zip(_a ,_a ):
n_correct += 1.0 if math_equivalence.is_equiv(_a ,_a ) else 0.0
A_ : List[str] = n_correct / len(_a )
return {
"accuracy": accuracy,
}
| 665 | 1 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = 42
@flax_register_to_config
class __lowerCAmelCase ( nn.Module , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = 32
a_ = 4
a_ = 4
a_ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
a_ = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
a_ = False
a_ = (320, 640, 1_280, 1_280)
a_ = 2
a_ = 8
a_ = None
a_ = 1_280
a_ = 0.0
a_ = False
a_ = jnp.floataa
a_ = True
a_ = 0
a_ = False
def _a ( self : List[str] ,_a : jax.random.KeyArray ):
'''simple docstring'''
A_ : Union[str, Any] = (1, self.in_channels, self.sample_size, self.sample_size)
A_ : Optional[Any] = jnp.zeros(_a ,dtype=jnp.floataa )
A_ : Union[str, Any] = jnp.ones((1,) ,dtype=jnp.intaa )
A_ : str = jnp.zeros((1, 1, self.cross_attention_dim) ,dtype=jnp.floataa )
A_ , A_ : Optional[Any] = jax.random.split(_a )
A_ : Optional[Any] = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(_a ,_a ,_a ,_a )["params"]
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Dict = self.block_out_channels
A_ : str = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
"""At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.""" )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
A_ : Optional[Any] = self.num_attention_heads or self.attention_head_dim
# input
A_ : int = nn.Conv(
block_out_channels[0] ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
# time
A_ : Any = FlaxTimesteps(
block_out_channels[0] ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.config.freq_shift )
A_ : List[str] = FlaxTimestepEmbedding(_a ,dtype=self.dtype )
A_ : Optional[int] = self.only_cross_attention
if isinstance(_a ,_a ):
A_ : str = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_a ,_a ):
A_ : List[Any] = (num_attention_heads,) * len(self.down_block_types )
# down
A_ : Optional[Any] = []
A_ : Dict = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
A_ : Tuple = output_channel
A_ : List[Any] = block_out_channels[i]
A_ : Optional[int] = i == len(_a ) - 1
if down_block_type == "CrossAttnDownBlock2D":
A_ : Tuple = FlaxCrossAttnDownBlockaD(
in_channels=_a ,out_channels=_a ,dropout=self.dropout ,num_layers=self.layers_per_block ,num_attention_heads=num_attention_heads[i] ,add_downsample=not is_final_block ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
else:
A_ : str = FlaxDownBlockaD(
in_channels=_a ,out_channels=_a ,dropout=self.dropout ,num_layers=self.layers_per_block ,add_downsample=not is_final_block ,dtype=self.dtype ,)
down_blocks.append(_a )
A_ : Union[str, Any] = down_blocks
# mid
A_ : Optional[Any] = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] ,dropout=self.dropout ,num_attention_heads=num_attention_heads[-1] ,use_linear_projection=self.use_linear_projection ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
# up
A_ : Dict = []
A_ : List[str] = list(reversed(_a ) )
A_ : Any = list(reversed(_a ) )
A_ : Union[str, Any] = list(reversed(_a ) )
A_ : Any = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
A_ : Dict = output_channel
A_ : Optional[int] = reversed_block_out_channels[i]
A_ : Optional[int] = reversed_block_out_channels[min(i + 1 ,len(_a ) - 1 )]
A_ : Dict = i == len(_a ) - 1
if up_block_type == "CrossAttnUpBlock2D":
A_ : Any = FlaxCrossAttnUpBlockaD(
in_channels=_a ,out_channels=_a ,prev_output_channel=_a ,num_layers=self.layers_per_block + 1 ,num_attention_heads=reversed_num_attention_heads[i] ,add_upsample=not is_final_block ,dropout=self.dropout ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
else:
A_ : Dict = FlaxUpBlockaD(
in_channels=_a ,out_channels=_a ,prev_output_channel=_a ,num_layers=self.layers_per_block + 1 ,add_upsample=not is_final_block ,dropout=self.dropout ,dtype=self.dtype ,)
up_blocks.append(_a )
A_ : int = output_channel
A_ : Dict = up_blocks
# out
A_ : int = nn.GroupNorm(num_groups=32 ,epsilon=1e-5 )
A_ : str = nn.Conv(
self.out_channels ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
def __call__( self : str ,_a : Tuple ,_a : List[str] ,_a : Optional[int] ,_a : Optional[Any]=None ,_a : Union[str, Any]=None ,_a : bool = True ,_a : bool = False ,):
'''simple docstring'''
if not isinstance(_a ,jnp.ndarray ):
A_ : Any = jnp.array([timesteps] ,dtype=jnp.intaa )
elif isinstance(_a ,jnp.ndarray ) and len(timesteps.shape ) == 0:
A_ : int = timesteps.astype(dtype=jnp.floataa )
A_ : List[str] = jnp.expand_dims(_a ,0 )
A_ : int = self.time_proj(_a )
A_ : Union[str, Any] = self.time_embedding(_a )
# 2. pre-process
A_ : str = jnp.transpose(_a ,(0, 2, 3, 1) )
A_ : List[str] = self.conv_in(_a )
# 3. down
A_ : Tuple = (sample,)
for down_block in self.down_blocks:
if isinstance(_a ,_a ):
A_ , A_ : Union[str, Any] = down_block(_a ,_a ,_a ,deterministic=not train )
else:
A_ , A_ : str = down_block(_a ,_a ,deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
A_ : List[Any] = ()
for down_block_res_sample, down_block_additional_residual in zip(
_a ,_a ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
A_ : Union[str, Any] = new_down_block_res_samples
# 4. mid
A_ : List[Any] = self.mid_block(_a ,_a ,_a ,deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
A_ : Union[str, Any] = down_block_res_samples[-(self.layers_per_block + 1) :]
A_ : Any = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_a ,_a ):
A_ : List[str] = up_block(
_a ,temb=_a ,encoder_hidden_states=_a ,res_hidden_states_tuple=_a ,deterministic=not train ,)
else:
A_ : Optional[int] = up_block(_a ,temb=_a ,res_hidden_states_tuple=_a ,deterministic=not train )
# 6. post-process
A_ : Optional[Any] = self.conv_norm_out(_a )
A_ : Tuple = nn.silu(_a )
A_ : Optional[Any] = self.conv_out(_a )
A_ : List[str] = jnp.transpose(_a ,(0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_a )
| 665 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__magic_name__ = logging.get_logger(__name__)
# TODO: upload to AWS
__magic_name__ = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """retribert"""
def __init__( self : int ,_a : Dict=30522 ,_a : List[Any]=768 ,_a : Optional[Any]=8 ,_a : str=12 ,_a : str=3072 ,_a : Tuple="gelu" ,_a : Optional[int]=0.1 ,_a : Dict=0.1 ,_a : List[Any]=512 ,_a : Union[str, Any]=2 ,_a : Tuple=0.02 ,_a : List[str]=1e-12 ,_a : Dict=True ,_a : Tuple=128 ,_a : Optional[int]=0 ,**_a : Tuple ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,**_a )
A_ : Dict = vocab_size
A_ : int = hidden_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : Tuple = hidden_act
A_ : int = intermediate_size
A_ : Tuple = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : int = max_position_embeddings
A_ : Any = type_vocab_size
A_ : Optional[int] = initializer_range
A_ : Dict = layer_norm_eps
A_ : str = share_encoders
A_ : List[Any] = projection_dim
| 665 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
__magic_name__ = TypeVar('T')
class __lowerCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : int ,_a : list[T] ,_a : Callable[[T, T], T] ):
'''simple docstring'''
A_ : Any | T = None
A_ : int = len(_a )
A_ : list[T] = [any_type for _ in range(self.N )] + arr
A_ : Union[str, Any] = fnc
self.build()
def _a ( self : List[str] ):
'''simple docstring'''
for p in range(self.N - 1 ,0 ,-1 ):
A_ : str = self.fn(self.st[p * 2] ,self.st[p * 2 + 1] )
def _a ( self : Optional[int] ,_a : int ,_a : T ):
'''simple docstring'''
p += self.N
A_ : Union[str, Any] = v
while p > 1:
A_ : Optional[Any] = p // 2
A_ : Union[str, Any] = self.fn(self.st[p * 2] ,self.st[p * 2 + 1] )
def _a ( self : List[str] ,_a : int ,_a : int ): # noqa: E741
'''simple docstring'''
A_ , A_ : Optional[int] = l + self.N, r + self.N
A_ : T | None = None
while l <= r:
if l % 2 == 1:
A_ : Union[str, Any] = self.st[l] if res is None else self.fn(_a ,self.st[l] )
if r % 2 == 0:
A_ : Tuple = self.st[r] if res is None else self.fn(_a ,self.st[r] )
A_ , A_ : Optional[int] = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
__magic_name__ = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
__magic_name__ = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
__magic_name__ = SegmentTree(test_array, min)
__magic_name__ = SegmentTree(test_array, max)
__magic_name__ = SegmentTree(test_array, lambda a, b: a + b)
def lowerCamelCase ( ):
for i in range(len(lowerCamelCase)):
for j in range(lowerCamelCase , len(lowerCamelCase)):
A_ : Optional[int] = reduce(lowerCamelCase , test_array[i : j + 1])
A_ : int = reduce(lowerCamelCase , test_array[i : j + 1])
A_ : str = reduce(lambda lowerCamelCase , lowerCamelCase: a + b , test_array[i : j + 1])
assert min_range == min_segment_tree.query(lowerCamelCase , lowerCamelCase)
assert max_range == max_segment_tree.query(lowerCamelCase , lowerCamelCase)
assert sum_range == sum_segment_tree.query(lowerCamelCase , lowerCamelCase)
test_all_segments()
for index, value in test_updates.items():
__magic_name__ = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 665 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'spiece.model'}
__magic_name__ = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
__magic_name__ = {
'google/bigbird-roberta-base': 4_096,
'google/bigbird-roberta-large': 4_096,
'google/bigbird-base-trivia-itc': 4_096,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
a_ = []
def __init__( self : Optional[int] ,_a : int ,_a : Optional[Any]="<unk>" ,_a : int="<s>" ,_a : str="</s>" ,_a : Optional[Any]="<pad>" ,_a : Tuple="[SEP]" ,_a : Tuple="[MASK]" ,_a : Union[str, Any]="[CLS]" ,_a : Optional[Dict[str, Any]] = None ,**_a : Any ,):
'''simple docstring'''
A_ : Dict = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else bos_token
A_ : Union[str, Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else eos_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else unk_token
A_ : Union[str, Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else pad_token
A_ : Any = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else cls_token
A_ : Optional[int] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
A_ : List[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
A_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a ,eos_token=_a ,unk_token=_a ,pad_token=_a ,sep_token=_a ,mask_token=_a ,cls_token=_a ,sp_model_kwargs=self.sp_model_kwargs ,**_a ,)
A_ : Optional[int] = vocab_file
A_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def _a ( self : Union[str, Any] ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Tuple = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = self.__dict__.copy()
A_ : Union[str, Any] = None
return state
def __setstate__( self : List[Any] ,_a : Any ):
'''simple docstring'''
A_ : Tuple = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
A_ : Tuple = {}
A_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self : Union[str, Any] ,_a : str ):
'''simple docstring'''
return self.sp_model.encode(_a ,out_type=_a )
def _a ( self : Optional[int] ,_a : str ):
'''simple docstring'''
return self.sp_model.piece_to_id(_a )
def _a ( self : int ,_a : Optional[int] ):
'''simple docstring'''
A_ : List[str] = self.sp_model.IdToPiece(_a )
return token
def _a ( self : Dict ,_a : int ):
'''simple docstring'''
A_ : int = []
A_ : Any = """"""
A_ : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
A_ : Dict = True
A_ : Union[str, Any] = []
else:
current_sub_tokens.append(_a )
A_ : str = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def _a ( self : int ,_a : List[int] ,_a : bool = False ,_a : bool = None ,_a : bool = True ,**_a : str ,):
'''simple docstring'''
A_ : Any = kwargs.pop("""use_source_tokenizer""" ,_a )
A_ : Union[str, Any] = self.convert_ids_to_tokens(_a ,skip_special_tokens=_a )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
A_ : str = []
A_ : int = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
A_ : List[str] = []
sub_texts.append(_a )
else:
current_sub_text.append(_a )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
A_ : Optional[int] = re.sub(r""" (\[(MASK|SEP)\])""" ,r"""\1""" ,""" """.join(_a ) )
else:
A_ : Tuple = """""".join(_a )
A_ : str = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
A_ : Optional[Any] = self.clean_up_tokenization(_a )
return clean_text
else:
return text
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_a )
elif not os.path.isfile(self.vocab_file ):
with open(_a ,"""wb""" ) as fi:
A_ : str = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
def _a ( self : Optional[Any] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ : List[Any] = [self.cls_token_id]
A_ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self : Optional[int] ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
def _a ( self : Tuple ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Tuple = [self.sep_token_id]
A_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 665 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict ,_a : Optional[int] ,_a : List[Any]=7 ,_a : List[Any]=3 ,_a : Optional[Any]=18 ,_a : List[Any]=30 ,_a : Dict=400 ,_a : Any=True ,_a : int=None ,_a : Union[str, Any]=True ,_a : Optional[Any]=None ,_a : str=True ,):
'''simple docstring'''
A_ : Optional[int] = size if size is not None else {"""shortest_edge""": 20}
A_ : List[Any] = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
A_ : List[str] = parent
A_ : List[Any] = batch_size
A_ : Optional[int] = num_channels
A_ : Union[str, Any] = image_size
A_ : str = min_resolution
A_ : Optional[int] = max_resolution
A_ : Union[str, Any] = do_resize
A_ : int = size
A_ : Any = do_center_crop
A_ : List[Any] = crop_size
A_ : Dict = do_flip_channel_order
def _a ( self : List[str] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = MobileViTImageProcessor if is_vision_available() else None
def _a ( self : Dict ):
'''simple docstring'''
A_ : int = MobileViTImageProcessingTester(self )
@property
def _a ( self : Dict ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a ,"""do_resize""" ) )
self.assertTrue(hasattr(_a ,"""size""" ) )
self.assertTrue(hasattr(_a ,"""do_center_crop""" ) )
self.assertTrue(hasattr(_a ,"""center_crop""" ) )
self.assertTrue(hasattr(_a ,"""do_flip_channel_order""" ) )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} )
A_ : str = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} )
def _a ( self : Tuple ):
'''simple docstring'''
pass
def _a ( self : Dict ):
'''simple docstring'''
A_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Optional[int] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a ,Image.Image )
# Test not batched input
A_ : Dict = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
A_ : Union[str, Any] = image_processing(_a ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def _a ( self : Dict ):
'''simple docstring'''
A_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_a ,numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a ,np.ndarray )
# Test not batched input
A_ : Dict = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
A_ : Tuple = image_processing(_a ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : Any = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_a ,torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a ,torch.Tensor )
# Test not batched input
A_ : Any = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
A_ : Union[str, Any] = image_processing(_a ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
| 665 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
a_ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a_ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _a ( self : List[str] ,_a : int ,_a : Any ,_a : int ):
'''simple docstring'''
A_ : Dict = TextaTextGenerationPipeline(model=_a ,tokenizer=_a )
return generator, ["Something to write", "Something else"]
def _a ( self : str ,_a : Union[str, Any] ,_a : int ):
'''simple docstring'''
A_ : Any = generator("""Something there""" )
self.assertEqual(_a ,[{"""generated_text""": ANY(_a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
A_ : List[Any] = generator(["""This is great !""", """Something else"""] ,num_return_sequences=2 ,do_sample=_a )
self.assertEqual(
_a ,[
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] ,)
A_ : List[str] = generator(
["""This is great !""", """Something else"""] ,num_return_sequences=2 ,batch_size=2 ,do_sample=_a )
self.assertEqual(
_a ,[
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] ,)
with self.assertRaises(_a ):
generator(4 )
@require_torch
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : int = pipeline("""text2text-generation""" ,model="""patrickvonplaten/t5-tiny-random""" ,framework="""pt""" )
# do_sample=False necessary for reproducibility
A_ : Tuple = generator("""Something there""" ,do_sample=_a )
self.assertEqual(_a ,[{"""generated_text""": """"""}] )
A_ : Optional[int] = 3
A_ : Tuple = generator(
"""Something there""" ,num_return_sequences=_a ,num_beams=_a ,)
A_ : Optional[Any] = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(_a ,_a )
A_ : Optional[int] = generator("""This is a test""" ,do_sample=_a ,num_return_sequences=2 ,return_tensors=_a )
self.assertEqual(
_a ,[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] ,)
A_ : Dict = generator.model.config.eos_token_id
A_ : Optional[int] = """<pad>"""
A_ : List[Any] = generator(
["""This is a test""", """This is a second test"""] ,do_sample=_a ,num_return_sequences=2 ,batch_size=2 ,return_tensors=_a ,)
self.assertEqual(
_a ,[
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] ,)
@require_tf
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = pipeline("""text2text-generation""" ,model="""patrickvonplaten/t5-tiny-random""" ,framework="""tf""" )
# do_sample=False necessary for reproducibility
A_ : Dict = generator("""Something there""" ,do_sample=_a )
self.assertEqual(_a ,[{"""generated_text""": """"""}] )
| 665 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : int = 3 , lowerCamelCase : int = 7 , lowerCamelCase : int = 100_0000):
A_ : str = 0
A_ : List[str] = 1
for current_denominator in range(1 , limit + 1):
A_ : Union[str, Any] = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
A_ : List[Any] = current_numerator
A_ : Dict = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_000_000))
| 665 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """gpt_bigcode"""
a_ = ["""past_key_values"""]
a_ = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Optional[int] ,_a : Optional[int]=50257 ,_a : Dict=1024 ,_a : Union[str, Any]=768 ,_a : Union[str, Any]=12 ,_a : Union[str, Any]=12 ,_a : Tuple=None ,_a : int="gelu_pytorch_tanh" ,_a : Optional[Any]=0.1 ,_a : List[str]=0.1 ,_a : Union[str, Any]=0.1 ,_a : List[Any]=1e-5 ,_a : List[str]=0.02 ,_a : Any=True ,_a : Union[str, Any]=True ,_a : Tuple=50256 ,_a : Optional[int]=50256 ,_a : int=True ,_a : Optional[int]=True ,_a : Optional[int]=True ,**_a : List[str] ,):
'''simple docstring'''
A_ : Optional[Any] = vocab_size
A_ : int = n_positions
A_ : Union[str, Any] = n_embd
A_ : int = n_layer
A_ : Optional[int] = n_head
A_ : Union[str, Any] = n_inner
A_ : List[Any] = activation_function
A_ : Dict = resid_pdrop
A_ : int = embd_pdrop
A_ : Optional[int] = attn_pdrop
A_ : Union[str, Any] = layer_norm_epsilon
A_ : int = initializer_range
A_ : Union[str, Any] = scale_attn_weights
A_ : List[str] = use_cache
A_ : Tuple = attention_softmax_in_fpaa
A_ : List[str] = scale_attention_softmax_in_fpaa
A_ : Union[str, Any] = multi_query
A_ : Any = bos_token_id
A_ : Optional[int] = eos_token_id
super().__init__(bos_token_id=_a ,eos_token_id=_a ,**_a )
| 665 | 1 |
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""vqvae"""]
def __init__( self : Optional[Any] ,_a : AutoencoderKL ,_a : UNetaDConditionModel ,_a : Mel ,_a : Union[DDIMScheduler, DDPMScheduler] ,):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_a ,scheduler=_a ,mel=_a ,vqvae=_a )
def _a ( self : str ):
'''simple docstring'''
return 50 if isinstance(self.scheduler ,_a ) else 1000
@torch.no_grad()
def __call__( self : Optional[int] ,_a : int = 1 ,_a : str = None ,_a : np.ndarray = None ,_a : int = 0 ,_a : int = 0 ,_a : int = None ,_a : torch.Generator = None ,_a : float = 0 ,_a : float = 0 ,_a : torch.Generator = None ,_a : float = 0 ,_a : torch.Tensor = None ,_a : torch.Tensor = None ,_a : int=True ,):
'''simple docstring'''
A_ : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(_a )
A_ : Union[str, Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
A_ : Tuple = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
A_ : int = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) ,generator=_a ,device=self.device ,)
A_ : List[Any] = noise
A_ : str = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_a ,_a )
A_ : Any = self.mel.audio_slice_to_image(_a )
A_ : Union[str, Any] = np.frombuffer(input_image.tobytes() ,dtype="""uint8""" ).reshape(
(input_image.height, input_image.width) )
A_ : Optional[Any] = (input_image / 255) * 2 - 1
A_ : Union[str, Any] = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device )
if self.vqvae is not None:
A_ : Union[str, Any] = self.vqvae.encode(torch.unsqueeze(_a ,0 ) ).latent_dist.sample(
generator=_a )[0]
A_ : List[str] = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
A_ : Any = self.scheduler.add_noise(_a ,_a ,self.scheduler.timesteps[start_step - 1] )
A_ : Tuple = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
A_ : Tuple = int(mask_start_secs * pixels_per_second )
A_ : str = int(mask_end_secs * pixels_per_second )
A_ : int = self.scheduler.add_noise(_a ,_a ,torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet ,_a ):
A_ : Optional[Any] = self.unet(_a ,_a ,_a )["""sample"""]
else:
A_ : List[Any] = self.unet(_a ,_a )["""sample"""]
if isinstance(self.scheduler ,_a ):
A_ : Dict = self.scheduler.step(
model_output=_a ,timestep=_a ,sample=_a ,eta=_a ,generator=_a ,)["""prev_sample"""]
else:
A_ : Any = self.scheduler.step(
model_output=_a ,timestep=_a ,sample=_a ,generator=_a ,)["""prev_sample"""]
if mask is not None:
if mask_start > 0:
A_ : Tuple = mask[:, step, :, :mask_start]
if mask_end > 0:
A_ : List[str] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
A_ : str = 1 / self.vqvae.config.scaling_factor * images
A_ : Union[str, Any] = self.vqvae.decode(_a )["""sample"""]
A_ : int = (images / 2 + 0.5).clamp(0 ,1 )
A_ : str = images.cpu().permute(0 ,2 ,3 ,1 ).numpy()
A_ : Optional[int] = (images * 255).round().astype("""uint8""" )
A_ : List[Any] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_a ,mode="""RGB""" ).convert("""L""" ) for _ in images) )
A_ : Tuple = [self.mel.image_to_audio(_a ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_a )[:, np.newaxis, :] ) ,**ImagePipelineOutput(_a ) )
@torch.no_grad()
def _a ( self : Union[str, Any] ,_a : List[Image.Image] ,_a : int = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler ,_a )
self.scheduler.set_timesteps(_a )
A_ : Optional[Any] = np.array(
[np.frombuffer(image.tobytes() ,dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] )
A_ : List[str] = (sample / 255) * 2 - 1
A_ : Optional[int] = torch.Tensor(_a ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ):
A_ : List[str] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
A_ : Any = self.scheduler.alphas_cumprod[t]
A_ : List[Any] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
A_ : str = 1 - alpha_prod_t
A_ : List[str] = self.unet(_a ,_a )["""sample"""]
A_ : str = (1 - alpha_prod_t_prev) ** 0.5 * model_output
A_ : Union[str, Any] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
A_ : Optional[int] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def _a ( _a : torch.Tensor ,_a : torch.Tensor ,_a : float ):
'''simple docstring'''
A_ : List[Any] = acos(torch.dot(torch.flatten(_a ) ,torch.flatten(_a ) ) / torch.norm(_a ) / torch.norm(_a ) )
return sin((1 - alpha) * theta ) * xa / sin(_a ) + sin(alpha * theta ) * xa / sin(_a )
| 665 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
__magic_name__ = {
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
__magic_name__ = {
'allenai/longformer-base-4096': 4_096,
'allenai/longformer-large-4096': 4_096,
'allenai/longformer-large-4096-finetuned-triviaqa': 4_096,
'allenai/longformer-base-4096-extra.pos.embd.only': 4_096,
'allenai/longformer-large-4096-extra.pos.embd.only': 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCamelCase ( ):
A_ : Union[str, Any] = (
list(range(ord("""!""") , ord("""~""") + 1)) + list(range(ord("""¡""") , ord("""¬""") + 1)) + list(range(ord("""®""") , ord("""ÿ""") + 1))
)
A_ : Optional[Any] = bs[:]
A_ : List[str] = 0
for b in range(2**8):
if b not in bs:
bs.append(lowerCamelCase)
cs.append(2**8 + n)
n += 1
A_ : List[Any] = [chr(lowerCamelCase) for n in cs]
return dict(zip(lowerCamelCase , lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : int):
A_ : int = set()
A_ : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
A_ : List[str] = char
return pairs
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self : int ,_a : Tuple ,_a : Union[str, Any] ,_a : Optional[Any]="replace" ,_a : Union[str, Any]="<s>" ,_a : Union[str, Any]="</s>" ,_a : int="</s>" ,_a : List[str]="<s>" ,_a : List[Any]="<unk>" ,_a : Any="<pad>" ,_a : Dict="<mask>" ,_a : Optional[int]=False ,**_a : List[Any] ,):
'''simple docstring'''
A_ : Dict = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else bos_token
A_ : Optional[int] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else eos_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else sep_token
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else cls_token
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else unk_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A_ : Any = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
super().__init__(
errors=_a ,bos_token=_a ,eos_token=_a ,unk_token=_a ,sep_token=_a ,cls_token=_a ,pad_token=_a ,mask_token=_a ,add_prefix_space=_a ,**_a ,)
with open(_a ,encoding="""utf-8""" ) as vocab_handle:
A_ : str = json.load(_a )
A_ : Optional[int] = {v: k for k, v in self.encoder.items()}
A_ : List[str] = errors # how to handle errors in decoding
A_ : List[str] = bytes_to_unicode()
A_ : str = {v: k for k, v in self.byte_encoder.items()}
with open(_a ,encoding="""utf-8""" ) as merges_handle:
A_ : Any = merges_handle.read().split("""\n""" )[1:-1]
A_ : str = [tuple(merge.split() ) for merge in bpe_merges]
A_ : int = dict(zip(_a ,range(len(_a ) ) ) )
A_ : List[Any] = {}
A_ : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A_ : Optional[Any] = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def _a ( self : Any ):
'''simple docstring'''
return len(self.encoder )
def _a ( self : str ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def _a ( self : int ,_a : int ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A_ : Optional[int] = tuple(_a )
A_ : Any = get_pairs(_a )
if not pairs:
return token
while True:
A_ : Optional[Any] = min(_a ,key=lambda _a : self.bpe_ranks.get(_a ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A_ , A_ : Dict = bigram
A_ : int = []
A_ : Optional[Any] = 0
while i < len(_a ):
try:
A_ : List[str] = word.index(_a ,_a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A_ : Tuple = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A_ : str = tuple(_a )
A_ : str = new_word
if len(_a ) == 1:
break
else:
A_ : int = get_pairs(_a )
A_ : Optional[int] = """ """.join(_a )
A_ : List[str] = word
return word
def _a ( self : Dict ,_a : Optional[int] ):
'''simple docstring'''
A_ : Any = []
for token in re.findall(self.pat ,_a ):
A_ : Any = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_a ).split(""" """ ) )
return bpe_tokens
def _a ( self : Union[str, Any] ,_a : Optional[int] ):
'''simple docstring'''
return self.encoder.get(_a ,self.encoder.get(self.unk_token ) )
def _a ( self : int ,_a : Dict ):
'''simple docstring'''
return self.decoder.get(_a )
def _a ( self : Optional[int] ,_a : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = """""".join(_a )
A_ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors )
return text
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_a ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_a ,ensure_ascii=_a ) + """\n""" )
A_ : int = 0
with open(_a ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _a : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
A_ : Dict = token_index
writer.write(""" """.join(_a ) + """\n""" )
index += 1
return vocab_file, merge_file
def _a ( self : List[str] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ : int = [self.cls_token_id]
A_ : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _a ( self : int ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def _a ( self : Any ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Union[str, Any] = [self.sep_token_id]
A_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a ( self : str ,_a : Optional[int] ,_a : Union[str, Any]=False ,**_a : Dict ):
'''simple docstring'''
A_ : Any = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_a ) > 0 and not text[0].isspace()):
A_ : Optional[int] = """ """ + text
return (text, kwargs)
| 665 | 1 |
'''simple docstring'''
import os
def lowerCamelCase ( ):
with open(os.path.dirname(lowerCamelCase) + """/p022_names.txt""") as file:
A_ : Any = str(file.readlines()[0])
A_ : str = names.replace("""\"""" , """""").split(""",""")
names.sort()
A_ : int = 0
A_ : Optional[Any] = 0
for i, name in enumerate(lowerCamelCase):
for letter in name:
name_score += ord(lowerCamelCase) - 64
total_score += (i + 1) * name_score
A_ : Any = 0
return total_score
if __name__ == "__main__":
print(solution())
| 665 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.txt'}
__magic_name__ = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__magic_name__ = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__magic_name__ = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_INIT_CONFIGURATION
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ConvBertTokenizer
def __init__( self : str ,_a : Dict=None ,_a : List[Any]=None ,_a : Dict=True ,_a : List[str]="[UNK]" ,_a : Any="[SEP]" ,_a : str="[PAD]" ,_a : List[Any]="[CLS]" ,_a : List[str]="[MASK]" ,_a : Union[str, Any]=True ,_a : Any=None ,**_a : Optional[int] ,):
'''simple docstring'''
super().__init__(
_a ,tokenizer_file=_a ,do_lower_case=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,tokenize_chinese_chars=_a ,strip_accents=_a ,**_a ,)
A_ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,_a ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,_a ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,_a ) != tokenize_chinese_chars
):
A_ : Dict = getattr(_a ,normalizer_state.pop("""type""" ) )
A_ : str = do_lower_case
A_ : Any = strip_accents
A_ : int = tokenize_chinese_chars
A_ : Tuple = normalizer_class(**_a )
A_ : Any = do_lower_case
def _a ( self : List[Any] ,_a : List[Any] ,_a : Any=None ):
'''simple docstring'''
A_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a ( self : Dict ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : int = [self.sep_token_id]
A_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
A_ : List[Any] = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
| 665 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : str):
A_ : List[Any] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
A_ : Optional[int] = """"""
A_ : List[Any] = """"""
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(lowerCamelCase) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
A_ , A_ : Optional[int] = 0, 0
# length[i] shows the length of palindromic substring with center i
A_ : int = [1 for i in range(len(lowerCamelCase))]
# for each character in new_string find corresponding palindromic string
A_ : Optional[Any] = 0
for j in range(len(lowerCamelCase)):
A_ : Dict = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1)
while (
j - k >= 0
and j + k < len(lowerCamelCase)
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
A_ : int = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
A_ : int = j - k + 1 # noqa: E741
A_ : str = j + k - 1
# update max_length and start position
if max_length < length[j]:
A_ : List[Any] = length[j]
A_ : int = j
# create that string
A_ : Optional[int] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all BART models at https://huggingface.co/models?filter=bart
__magic_name__ = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
'tokenizer_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json',
},
}
__magic_name__ = {
'facebook/bart-base': 1_024,
'facebook/bart-large': 1_024,
'facebook/bart-large-mnli': 1_024,
'facebook/bart-large-cnn': 1_024,
'facebook/bart-large-xsum': 1_024,
'yjernite/bart_eli5': 1_024,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
a_ = BartTokenizer
def __init__( self : str ,_a : Any=None ,_a : Optional[int]=None ,_a : int=None ,_a : Optional[int]="replace" ,_a : Dict="<s>" ,_a : Optional[Any]="</s>" ,_a : Dict="</s>" ,_a : Tuple="<s>" ,_a : Optional[Any]="<unk>" ,_a : List[str]="<pad>" ,_a : int="<mask>" ,_a : str=False ,_a : List[str]=True ,**_a : Dict ,):
'''simple docstring'''
super().__init__(
_a ,_a ,tokenizer_file=_a ,errors=_a ,bos_token=_a ,eos_token=_a ,sep_token=_a ,cls_token=_a ,unk_token=_a ,pad_token=_a ,mask_token=_a ,add_prefix_space=_a ,trim_offsets=_a ,**_a ,)
A_ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" ,_a ) != add_prefix_space:
A_ : List[str] = getattr(_a ,pre_tok_state.pop("""type""" ) )
A_ : Optional[int] = add_prefix_space
A_ : int = pre_tok_class(**_a )
A_ : str = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A_ : str = """post_processor"""
A_ : List[Any] = getattr(self.backend_tokenizer ,_a ,_a )
if tokenizer_component_instance:
A_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A_ : Tuple = tuple(state["""sep"""] )
if "cls" in state:
A_ : Tuple = tuple(state["""cls"""] )
A_ : List[str] = False
if state.get("""add_prefix_space""" ,_a ) != add_prefix_space:
A_ : Dict = add_prefix_space
A_ : Any = True
if state.get("""trim_offsets""" ,_a ) != trim_offsets:
A_ : Union[str, Any] = trim_offsets
A_ : List[Any] = True
if changes_to_apply:
A_ : Optional[int] = getattr(_a ,state.pop("""type""" ) )
A_ : Tuple = component_class(**_a )
setattr(self.backend_tokenizer ,_a ,_a )
@property
def _a ( self : List[str] ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def _a ( self : Union[str, Any] ,_a : Any ):
'''simple docstring'''
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else value
A_ : List[Any] = value
def _a ( self : str ,*_a : str ,**_a : Optional[int] ):
'''simple docstring'''
A_ : Optional[Any] = kwargs.get("""is_split_into_words""" ,_a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*_a ,**_a )
def _a ( self : str ,*_a : List[Any] ,**_a : str ):
'''simple docstring'''
A_ : List[str] = kwargs.get("""is_split_into_words""" ,_a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*_a ,**_a )
def _a ( self : Optional[int] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
A_ : str = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
def _a ( self : str ,_a : Optional[int] ,_a : int=None ):
'''simple docstring'''
A_ : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _a ( self : Optional[int] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Dict = [self.sep_token_id]
A_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 665 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.