code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ : Any = {
'''configuration_blenderbot_small''': [
'''BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotSmallConfig''',
'''BlenderbotSmallOnnxConfig''',
],
'''tokenization_blenderbot_small''': ['''BlenderbotSmallTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = ['''BlenderbotSmallTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : List[Any] = [
'''BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotSmallForCausalLM''',
'''BlenderbotSmallForConditionalGeneration''',
'''BlenderbotSmallModel''',
'''BlenderbotSmallPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Union[str, Any] = [
'''TFBlenderbotSmallForConditionalGeneration''',
'''TFBlenderbotSmallModel''',
'''TFBlenderbotSmallPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : List[Any] = [
'''FlaxBlenderbotSmallForConditionalGeneration''',
'''FlaxBlenderbotSmallModel''',
'''FlaxBlenderbotSmallPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
lowercase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 304
|
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__lowerCAmelCase ) )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
# Base Case
if index == len(__lowerCAmelCase ):
return True
# Recursive Step
for i in range(__lowerCAmelCase ):
if valid_coloring(graph[index] , __lowerCAmelCase , __lowerCAmelCase ):
# Color current vertex
_snake_case : int = i
# Validate coloring
if util_color(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , index + 1 ):
return True
# Backtrack
_snake_case : Optional[Any] = -1
return False
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : str = [-1] * len(__lowerCAmelCase )
if util_color(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , 0 ):
return colored_vertices
return []
| 304
| 1
|
'''simple docstring'''
from math import factorial
def lowerCAmelCase( a__ : int , a__ : int , a__ : float ):
'''simple docstring'''
if successes > trials:
raise ValueError("successes must be lower or equal to trials" )
if trials < 0 or successes < 0:
raise ValueError("the function is defined for non-negative integers" )
if not isinstance(a__ , a__ ) or not isinstance(a__ , a__ ):
raise ValueError("the function is defined for non-negative integers" )
if not 0 < prob < 1:
raise ValueError("prob has to be in range of 1 - 0" )
lowerCamelCase__ = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
lowerCamelCase__ = float(factorial(a__ ) )
coefficient /= factorial(a__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("Probability of 2 successes out of 4 trails")
print("with probability of 0.75 is:", end=" ")
print(binomial_distribution(2, 4, 0.75))
| 721
|
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class snake_case_ :
"""simple docstring"""
__lowerCAmelCase : int
__lowerCAmelCase : Node | None =None
__lowerCAmelCase : Node | None =None
def lowerCAmelCase( ):
'''simple docstring'''
lowerCamelCase__ = Node(1 )
lowerCamelCase__ = Node(2 )
lowerCamelCase__ = Node(3 )
lowerCamelCase__ = Node(4 )
lowerCamelCase__ = Node(5 )
return tree
def lowerCAmelCase( a__ : Node | None ):
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def lowerCAmelCase( a__ : Node | None ):
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def lowerCAmelCase( a__ : Node | None ):
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def lowerCAmelCase( a__ : Node | None ):
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def lowerCAmelCase( a__ : Node | None ):
'''simple docstring'''
lowerCamelCase__ = []
if root is None:
return output
lowerCamelCase__ = deque([root] )
while process_queue:
lowerCamelCase__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def lowerCAmelCase( a__ : Node | None , a__ : int ):
'''simple docstring'''
lowerCamelCase__ = []
def populate_output(a__ : Node | None , a__ : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(a__ , a__ )
return output
def lowerCAmelCase( a__ : Node | None , a__ : int ):
'''simple docstring'''
lowerCamelCase__ = []
def populate_output(a__ : Node | None , a__ : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(a__ , a__ )
return output
def lowerCAmelCase( a__ : Node | None ):
'''simple docstring'''
if root is None:
return []
lowerCamelCase__ = []
lowerCamelCase__ = 0
lowerCamelCase__ = height(a__ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(a__ , a__ ) )
lowerCamelCase__ = 1
else:
output.append(get_nodes_from_right_to_left(a__ , a__ ) )
lowerCamelCase__ = 0
return output
def lowerCAmelCase( ): # Main function for testing.
'''simple docstring'''
lowerCamelCase__ = make_tree()
print(f"""In-order Traversal: {inorder(a__ )}""" )
print(f"""Pre-order Traversal: {preorder(a__ )}""" )
print(f"""Post-order Traversal: {postorder(a__ )}""" , "\n" )
print(f"""Height of Tree: {height(a__ )}""" , "\n" )
print("Complete Level Order Traversal: " )
print(level_order(a__ ) , "\n" )
print("Level-wise order Traversal: " )
for level in range(1 , height(a__ ) + 1 ):
print(f"""Level {level}:""" , get_nodes_from_left_to_right(a__ , level=a__ ) )
print("\nZigZag order Traversal: " )
print(zigzag(a__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 426
| 0
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
@slow
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : str =TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
__magic_name__ : Dict =tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__magic_name__ : Any =model(__snake_case )["""last_hidden_state"""]
__magic_name__ : Any =tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , __snake_case )
# compare the actual values for a slice.
__magic_name__ : List[str] =tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 21
|
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
snake_case__ : int = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = CLIPConfig
_a = ["CLIPEncoderLayer"]
def __init__( self : Tuple , __a : CLIPConfig ) ->Union[str, Any]:
super().__init__(__a )
lowerCamelCase_ : List[Any] = CLIPVisionModelWithProjection(config.vision_config )
lowerCamelCase_ : int = nn.Linear(config.vision_config.projection_dim , 1 )
lowerCamelCase_ : int = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def _lowerCAmelCase ( self : Dict , __a : List[Any] , __a : Tuple , __a : int=0.5 , __a : Optional[Any]=0.5 ) ->Union[str, Any]:
lowerCamelCase_ : Dict = self.vision_model(__a )[0]
lowerCamelCase_ : str = self.p_head(__a )
lowerCamelCase_ : Union[str, Any] = nsfw_detected.flatten()
lowerCamelCase_ : Tuple = nsfw_detected > p_threshold
lowerCamelCase_ : Dict = nsfw_detected.tolist()
if any(__a ):
logger.warning(
"""Potential NSFW content was detected in one or more images. A black image will be returned instead."""
""" Try again with a different prompt and/or seed.""" )
for idx, nsfw_detected_ in enumerate(__a ):
if nsfw_detected_:
lowerCamelCase_ : int = np.zeros(images[idx].shape )
lowerCamelCase_ : Union[str, Any] = self.w_head(__a )
lowerCamelCase_ : List[str] = watermark_detected.flatten()
lowerCamelCase_ : Dict = watermark_detected > w_threshold
lowerCamelCase_ : Optional[int] = watermark_detected.tolist()
if any(__a ):
logger.warning(
"""Potential watermarked content was detected in one or more images. A black image will be returned instead."""
""" Try again with a different prompt and/or seed.""" )
for idx, watermark_detected_ in enumerate(__a ):
if watermark_detected_:
lowerCamelCase_ : Tuple = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 278
| 0
|
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def __a ( A ):
'''simple docstring'''
lowercase__ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
def __a ( A ):
'''simple docstring'''
lowercase__ , lowercase__ = emb.weight.shape
lowercase__ = nn.Linear(_snake_case , _snake_case , bias=_snake_case )
lowercase__ = emb.weight.data
return lin_layer
def __a ( A ):
'''simple docstring'''
lowercase__ = torch.load(_snake_case , map_location="cpu" )
lowercase__ = mam_aaa["args"] or mam_aaa["cfg"]["model"]
lowercase__ = mam_aaa["model"]
remove_ignore_keys_(_snake_case )
lowercase__ = state_dict["encoder.embed_tokens.weight"].shape[0]
lowercase__ = MaMaaaConfig(
vocab_size=_snake_case , max_position_embeddings=10_24 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , )
lowercase__ = state_dict["decoder.embed_tokens.weight"]
lowercase__ = MaMaaaForConditionalGeneration(_snake_case )
model.model.load_state_dict(_snake_case , strict=_snake_case )
lowercase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase_: List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
lowerCAmelCase_: str = parser.parse_args()
lowerCAmelCase_: Any = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 711
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_: Union[str, Any] = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Union[str, Any] = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Any = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Tuple = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Optional[Any] = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase_: Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 668
| 0
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class A( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase( self ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase :List[str] = [[1, 2, 4], [1, 2, 3, 4]]
_UpperCamelCase :List[str] = DisjunctiveConstraint(SCREAMING_SNAKE_CASE__ )
self.assertTrue(isinstance(dc.token_ids , SCREAMING_SNAKE_CASE__ ) )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def _UpperCamelCase( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase :Optional[int] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
DisjunctiveConstraint(SCREAMING_SNAKE_CASE__ ) # fails here
def _UpperCamelCase( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase :Dict = [[1, 2, 3], [1, 2, 4]]
_UpperCamelCase :int = DisjunctiveConstraint(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase :List[Any] = dc.update(1 )
_UpperCamelCase :Optional[int] = stepped is True and completed is False and reset is False
self.assertTrue(SCREAMING_SNAKE_CASE__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase :int = dc.update(2 )
_UpperCamelCase :Any = stepped is True and completed is False and reset is False
self.assertTrue(SCREAMING_SNAKE_CASE__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase :Dict = dc.update(3 )
_UpperCamelCase :int = stepped is True and completed is True and reset is False
self.assertTrue(SCREAMING_SNAKE_CASE__ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def _UpperCamelCase( self ) -> str:
"""simple docstring"""
_UpperCamelCase :List[Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
_UpperCamelCase :Optional[Any] = DisjunctiveConstraint(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase :Union[str, Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase :Any = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase :Any = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase :int = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase :List[str] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase :str = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase :Any = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 355
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A( lowerCamelCase__ ):
"""simple docstring"""
A = ["image_processor", "tokenizer"]
A = "ViTImageProcessor"
A = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase :List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , SCREAMING_SNAKE_CASE__ , )
_UpperCamelCase :List[str] = kwargs.pop('''feature_extractor''' )
_UpperCamelCase :Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __call__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError('''You have to specify either text, visual prompt or images.''' )
if text is not None and visual_prompt is not None:
raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''' )
if text is not None:
_UpperCamelCase :Dict = self.tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if visual_prompt is not None:
_UpperCamelCase :Optional[Any] = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if images is not None:
_UpperCamelCase :Union[str, Any] = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if visual_prompt is not None and images is not None:
_UpperCamelCase :List[str] = {
'''pixel_values''': image_features.pixel_values,
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
_UpperCamelCase :List[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
_UpperCamelCase :Optional[Any] = {
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE__ ) , tensor_type=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def _UpperCamelCase( self ) -> Tuple:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , SCREAMING_SNAKE_CASE__ , )
return self.image_processor_class
@property
def _UpperCamelCase( self ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , SCREAMING_SNAKE_CASE__ , )
return self.image_processor
| 355
| 1
|
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ):
lowerCAmelCase : Any = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(a_ )
lowerCAmelCase : Dict = -1
lowerCAmelCase : Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(a_ )
lowerCAmelCase : List[Any] = model.generate(a_ , max_new_tokens=10 , do_sample=a_ )
lowerCAmelCase : Union[str, Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowerCAmelCase : List[Any] = TextStreamer(a_ )
model.generate(a_ , max_new_tokens=10 , do_sample=a_ , streamer=a_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCAmelCase : Tuple = cs.out[:-1]
self.assertEqual(a_ , a_ )
def _lowerCamelCase ( self ):
lowerCAmelCase : int = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowerCAmelCase : Tuple = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(a_ )
lowerCAmelCase : Any = -1
lowerCAmelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(a_ )
lowerCAmelCase : Tuple = model.generate(a_ , max_new_tokens=10 , do_sample=a_ )
lowerCAmelCase : Tuple = tokenizer.decode(greedy_ids[0] )
lowerCAmelCase : Union[str, Any] = TextIteratorStreamer(a_ )
lowerCAmelCase : str = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
lowerCAmelCase : Dict = Thread(target=model.generate , kwargs=a_ )
thread.start()
lowerCAmelCase : List[str] = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(a_ , a_ )
def _lowerCamelCase ( self ):
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowerCAmelCase : List[str] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(a_ )
lowerCAmelCase : Any = -1
lowerCAmelCase : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(a_ )
lowerCAmelCase : Dict = model.generate(a_ , max_new_tokens=10 , do_sample=a_ )
lowerCAmelCase : Optional[int] = greedy_ids[:, input_ids.shape[1] :]
lowerCAmelCase : int = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowerCAmelCase : Optional[Any] = TextStreamer(a_ , skip_prompt=a_ )
model.generate(a_ , max_new_tokens=10 , do_sample=a_ , streamer=a_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCAmelCase : Dict = cs.out[:-1]
self.assertEqual(a_ , a_ )
def _lowerCamelCase ( self ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowerCAmelCase : int = AutoTokenizer.from_pretrained("distilgpt2" )
lowerCAmelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(a_ )
lowerCAmelCase : List[Any] = -1
lowerCAmelCase : int = torch.ones((1, 5) , device=a_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowerCAmelCase : List[str] = TextStreamer(a_ , skip_special_tokens=a_ )
model.generate(a_ , max_new_tokens=1 , do_sample=a_ , streamer=a_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowerCAmelCase : Optional[Any] = cs.out[:-1] # Remove the final "\n"
lowerCAmelCase : str = tokenizer(a_ , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _lowerCamelCase ( self ):
lowerCAmelCase : Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowerCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(a_ )
lowerCAmelCase : Any = -1
lowerCAmelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(a_ )
lowerCAmelCase : List[Any] = TextIteratorStreamer(a_ , timeout=0.001 )
lowerCAmelCase : int = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
lowerCAmelCase : List[str] = Thread(target=model.generate , kwargs=a_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(a_ ):
lowerCAmelCase : Any = ""
for new_text in streamer:
streamer_text += new_text
| 551
|
'''simple docstring'''
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def __A ( a_ : List[str] ,a_ : Dict ,a_ : List[Any] ,a_ : Optional[int]=1_0_2_4 ):
lowerCAmelCase , lowerCAmelCase : Dict = [], []
lowerCAmelCase : Optional[Any] = list(zip(a_ ,a_ ) )
lowerCAmelCase , lowerCAmelCase : Any = sorted_examples[0]
def is_too_big(a_ : Any ):
return tok(a_ ,return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
lowerCAmelCase : Optional[int] = new_src + " " + src
lowerCAmelCase : Union[str, Any] = new_tgt + " " + tgt
if is_too_big(a_ ) or is_too_big(a_ ): # cant fit, finalize example
finished_src.append(a_ )
finished_tgt.append(a_ )
lowerCAmelCase , lowerCAmelCase : str = src, tgt
else: # can fit, keep adding
lowerCAmelCase , lowerCAmelCase : List[str] = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(a_ )
finished_tgt.append(a_ )
return finished_src, finished_tgt
def __A ( a_ : List[str] ,a_ : Path ,a_ : Any ,a_ : int ):
lowerCAmelCase : Optional[int] = Path(a_ )
save_path.mkdir(exist_ok=a_ )
for split in ["train"]:
lowerCAmelCase , lowerCAmelCase : Tuple = data_dir / f'''{split}.source''', data_dir / f'''{split}.target'''
lowerCAmelCase : Tuple = [x.rstrip() for x in Path(a_ ).open().readlines()]
lowerCAmelCase : str = [x.rstrip() for x in Path(a_ ).open().readlines()]
lowerCAmelCase , lowerCAmelCase : Tuple = pack_examples(a_ ,a_ ,a_ ,a_ )
print(f'''packed {split} split from {len(a_ )} examples -> {len(a_ )}.''' )
Path(save_path / f'''{split}.source''' ).open("w" ).write("\n".join(a_ ) )
Path(save_path / f'''{split}.target''' ).open("w" ).write("\n".join(a_ ) )
for split in ["val", "test"]:
lowerCAmelCase , lowerCAmelCase : Optional[Any] = data_dir / f'''{split}.source''', data_dir / f'''{split}.target'''
shutil.copyfile(a_ ,save_path / f'''{split}.source''' )
shutil.copyfile(a_ ,save_path / f'''{split}.target''' )
def __A ( ):
lowerCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("--tok_name" ,type=a_ ,help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len" ,type=a_ ,default=1_2_8 )
parser.add_argument("--data_dir" ,type=a_ )
parser.add_argument("--save_path" ,type=a_ )
lowerCAmelCase : Optional[Any] = parser.parse_args()
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(a_ ,Path(args.data_dir ) ,args.max_seq_len ,args.save_path )
if __name__ == "__main__":
packer_cli()
| 551
| 1
|
'''simple docstring'''
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowerCAmelCase (__A , __A="shi-labs/oneformer_demo"):
"""simple docstring"""
with open(hf_hub_download(__A , __A , repo_type='''dataset''') , '''r''') as f:
_a = json.load(__A)
_a = {}
_a = []
_a = []
for key, info in class_info.items():
_a = info["name"]
class_names.append(info['''name'''])
if info["isthing"]:
thing_ids.append(int(__A))
_a = thing_ids
_a = class_names
return metadata
class __A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , A , A=7 , A=3 , A=30 , A=400 , A=None , A=True , A=True , A=[0.5, 0.5, 0.5] , A=[0.5, 0.5, 0.5] , A=10 , A=False , A=255 , A="shi-labs/oneformer_demo" , A="ade20k_panoptic.json" , A=10 , ) -> int:
"""simple docstring"""
_a = parent
_a = batch_size
_a = num_channels
_a = min_resolution
_a = max_resolution
_a = do_resize
_a = {"shortest_edge": 32, "longest_edge": 1_333} if size is None else size
_a = do_normalize
_a = image_mean
_a = image_std
_a = class_info_file
_a = prepare_metadata(lowerCAmelCase__ , lowerCAmelCase__ )
_a = num_text
_a = repo_path
# for the post_process_functions
_a = 2
_a = 10
_a = 10
_a = 3
_a = 4
_a = num_labels
_a = do_reduce_labels
_a = ignore_index
def a__ (self ) -> str:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def a__ (self , A , A=False ) -> Optional[int]:
"""simple docstring"""
if not batched:
_a = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image ):
_a = image.size
else:
_a = image.shape[1], image.shape[2]
if w < h:
_a = int(self.size['''shortest_edge'''] * h / w )
_a = self.size["shortest_edge"]
elif w > h:
_a = self.size["shortest_edge"]
_a = int(self.size['''shortest_edge'''] * w / h )
else:
_a = self.size["shortest_edge"]
_a = self.size["shortest_edge"]
else:
_a = []
for image in image_inputs:
_a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_a = max(lowerCAmelCase__ , key=lambda A : item[0] )[0]
_a = max(lowerCAmelCase__ , key=lambda A : item[1] )[1]
return expected_height, expected_width
def a__ (self ) -> Tuple:
"""simple docstring"""
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class __A ( a_ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[str] = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
__lowerCamelCase : int = image_processing_class
def a__ (self ) -> Tuple:
"""simple docstring"""
_a = OneFormerImageProcessorTester(self )
@property
def a__ (self ) -> int:
"""simple docstring"""
return self.image_processing_tester.prepare_image_processor_dict()
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''size''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''ignore_index''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''class_info_file''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''num_text''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''repo_path''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''metadata''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_reduce_labels''' ) )
def a__ (self ) -> List[Any]:
"""simple docstring"""
pass
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
_a = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
_a = self.image_processing_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_a = self.image_processing_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
_a = image_processor(
lowerCAmelCase__ , ['''semantic'''] * len(lowerCAmelCase__ ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_a = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
_a = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
_a = self.image_processing_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_a = self.image_processing_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
_a = image_processor(
lowerCAmelCase__ , ['''semantic'''] * len(lowerCAmelCase__ ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ (self ) -> Dict:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
_a = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
_a = self.image_processing_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_a = self.image_processing_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
_a = image_processor(
lowerCAmelCase__ , ['''semantic'''] * len(lowerCAmelCase__ ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ (self , A=False , A=False , A="np" ) -> str:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
_a = self.image_processing_tester.num_labels
_a = None
_a = None
_a = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCAmelCase__ )
if with_segmentation_maps:
_a = num_labels
if is_instance_map:
_a = list(range(lowerCAmelCase__ ) ) * 2
_a = dict(enumerate(lowerCAmelCase__ ) )
_a = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
_a = [Image.fromarray(lowerCAmelCase__ ) for annotation in annotations]
_a = image_processor(
lowerCAmelCase__ , ['''semantic'''] * len(lowerCAmelCase__ ) , lowerCAmelCase__ , return_tensors='''pt''' , instance_id_to_semantic_id=lowerCAmelCase__ , pad_and_return_pixel_mask=lowerCAmelCase__ , )
return inputs
def a__ (self ) -> List[Any]:
"""simple docstring"""
pass
def a__ (self ) -> Dict:
"""simple docstring"""
def common(A=False , A=None ):
_a = self.comm_get_image_processor_inputs(
with_segmentation_maps=lowerCAmelCase__ , is_instance_map=lowerCAmelCase__ , segmentation_type=lowerCAmelCase__ )
_a = inputs["mask_labels"]
_a = inputs["class_labels"]
_a = inputs["pixel_values"]
_a = inputs["text_inputs"]
# check the batch_size
for mask_label, class_label, text_input in zip(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(lowerCAmelCase__ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=lowerCAmelCase__ )
common(is_instance_map=lowerCAmelCase__ , segmentation_type='''pil''' )
common(is_instance_map=lowerCAmelCase__ , segmentation_type='''pil''' )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = np.zeros((20, 50) )
_a = 1
_a = 1
_a = 1
_a = binary_mask_to_rle(lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
_a = self.image_processing_tester.get_fake_oneformer_outputs()
_a = fature_extractor.post_process_semantic_segmentation(lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
_a = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
_a = fature_extractor.post_process_semantic_segmentation(lowerCAmelCase__ , target_sizes=lowerCAmelCase__ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def a__ (self ) -> Dict:
"""simple docstring"""
_a = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
_a = self.image_processing_tester.get_fake_oneformer_outputs()
_a = image_processor.post_process_instance_segmentation(lowerCAmelCase__ , threshold=0 )
self.assertTrue(len(lowerCAmelCase__ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , lowerCAmelCase__ )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
_a = self.image_processing_tester.get_fake_oneformer_outputs()
_a = image_processor.post_process_panoptic_segmentation(lowerCAmelCase__ , threshold=0 )
self.assertTrue(len(lowerCAmelCase__ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , lowerCAmelCase__ )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 11
|
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : int = logging.get_logger()
@dataclass
class A_ :
"""simple docstring"""
a__ = 42
a__ = field(default_factory=a_ )
a__ = field(default_factory=a_ )
def _A ( self :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tensor , lowerCAmelCase__ :Tensor ) -> int:
'''simple docstring'''
snake_case_ : int = len(list(m.modules() ) ) == 1 or isinstance(lowerCAmelCase__ , nn.Convad ) or isinstance(lowerCAmelCase__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(lowerCAmelCase__ )
def __call__( self :List[Any] , lowerCAmelCase__ :Tensor ) -> Union[str, Any]:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(lowerCAmelCase__ )
[x.remove() for x in self.handles]
return self
@property
def _A ( self :int ) -> List[Any]:
'''simple docstring'''
return list(filter(lambda lowerCAmelCase__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class A_ :
"""simple docstring"""
a__ = 42
a__ = 42
a__ = 0
a__ = field(default_factory=a_ )
a__ = field(default_factory=a_ )
def __call__( self :Tuple , lowerCAmelCase__ :Tensor ) -> Tuple:
'''simple docstring'''
snake_case_ : List[Any] = Tracker(self.dest )(lowerCAmelCase__ ).parametrized
snake_case_ : Tuple = Tracker(self.src )(lowerCAmelCase__ ).parametrized
snake_case_ : List[str] = list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.src_skip , lowerCAmelCase__ ) )
snake_case_ : Tuple = list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.dest_skip , lowerCAmelCase__ ) )
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise Exception(
F'''Numbers of operations are different. Source module has {len(lowerCAmelCase__ )} operations while'''
F''' destination module has {len(lowerCAmelCase__ )}.''' )
for dest_m, src_m in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ = True )-> Optional[int]:
"""simple docstring"""
print(F'''Converting {name}...''' )
with torch.no_grad():
snake_case_ : List[str] = timm.create_model(__magic_name__ ,pretrained=__magic_name__ ).eval()
snake_case_ : Optional[int] = ResNetForImageClassification(__magic_name__ ).eval()
snake_case_ : Dict = ModuleTransfer(src=__magic_name__ ,dest=__magic_name__ )
snake_case_ : Optional[int] = torch.randn((1, 3, 224, 224) )
module_transfer(__magic_name__ )
assert torch.allclose(from_model(__magic_name__ ) ,our_model(__magic_name__ ).logits ), "The model logits don't match the original one."
snake_case_ : str = F'''resnet{'-'.join(name.split('resnet' ) )}'''
print(__magic_name__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name ,commit_message="Add model" ,use_temp_dir=__magic_name__ ,)
# we can use the convnext one
snake_case_ : Optional[Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name ,commit_message="Add image processor" ,use_temp_dir=__magic_name__ ,)
print(F'''Pushed {checkpoint_name}''' )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = None ,__magic_name__ = True )-> Tuple:
"""simple docstring"""
snake_case_ : List[str] = "imagenet-1k-id2label.json"
snake_case_ : Optional[Any] = 1000
snake_case_ : List[Any] = (1, num_labels)
snake_case_ : Optional[Any] = "huggingface/label-files"
snake_case_ : Dict = num_labels
snake_case_ : List[Any] = json.load(open(hf_hub_download(__magic_name__ ,__magic_name__ ,repo_type="dataset" ) ,"r" ) )
snake_case_ : List[str] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case_ : Any = idalabel
snake_case_ : List[Any] = {v: k for k, v in idalabel.items()}
snake_case_ : Optional[int] = partial(__magic_name__ ,num_labels=__magic_name__ ,idalabel=__magic_name__ ,labelaid=__magic_name__ )
snake_case_ : Optional[int] = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] ,hidden_sizes=[64, 128, 256, 512] ,layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] ,hidden_sizes=[64, 128, 256, 512] ,layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(__magic_name__ ,names_to_config[model_name] ,__magic_name__ ,__magic_name__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
return config, expected_shape
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
__lowerCamelCase : Tuple = parser.parse_args()
__lowerCamelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 653
| 0
|
'''simple docstring'''
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: list ) -> int:
"""simple docstring"""
if not grid or not grid[0]:
raise TypeError('The grid does not contain the appropriate information' )
for cell_n in range(1, len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
__a = grid[0]
for row_n in range(1, len(SCREAMING_SNAKE_CASE__ ) ):
__a = grid[row_n]
__a = fill_row(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
__a = grid[row_n]
return grid[-1][-1]
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: list, SCREAMING_SNAKE_CASE__: list ) -> list:
"""simple docstring"""
current_row[0] += row_above[0]
for cell_n in range(1, len(SCREAMING_SNAKE_CASE__ ) ):
current_row[cell_n] += min(current_row[cell_n - 1], row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 270
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {
"""sayakpaul/vit-msn-base""": """https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json""",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
__a ="vit_msn"
def __init__( self , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1e-06 , lowerCamelCase=224 , lowerCamelCase=16 , lowerCamelCase=3 , lowerCamelCase=True , **lowerCamelCase , ) ->Tuple:
'''simple docstring'''
super().__init__(**lowerCamelCase )
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = image_size
__a = patch_size
__a = num_channels
__a = qkv_bias
| 270
| 1
|
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class __lowerCamelCase :
"""simple docstring"""
snake_case__ = 42
snake_case__ = None
snake_case__ = None
UpperCamelCase = namedtuple('CoinsDistribResult', 'moves excess')
def _A ( lowerCAmelCase_ : TreeNode | None ):
"""simple docstring"""
if root is None:
return 0
# Validation
def count_nodes(lowerCAmelCase_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowerCAmelCase_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowerCAmelCase_ ) != count_coins(lowerCAmelCase_ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(lowerCAmelCase_ : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowerCAmelCase__ , lowerCAmelCase__ = get_distrib(node.left )
lowerCAmelCase__ , lowerCAmelCase__ = get_distrib(node.right )
lowerCAmelCase__ = 1 - left_distrib_excess
lowerCAmelCase__ = 1 - right_distrib_excess
lowerCAmelCase__ = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowerCAmelCase_ )
+ abs(lowerCAmelCase_ )
)
lowerCAmelCase__ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowerCAmelCase_ , lowerCAmelCase_ )
return get_distrib(lowerCAmelCase_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 61
|
import os
from datetime import datetime as dt
from github import Github
__lowerCamelCase : Optional[int] = [
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def lowerCamelCase_() -> List[str]:
UpperCAmelCase = Github(os.environ["GITHUB_TOKEN"] )
UpperCAmelCase = g.get_repo("huggingface/diffusers" )
UpperCAmelCase = repo.get_issues(state="open" )
for issue in open_issues:
UpperCAmelCase = sorted(issue.get_comments() , key=lambda lowerCamelCase_ : i.created_at , reverse=lowerCamelCase_ )
UpperCAmelCase = comments[0] if len(lowerCamelCase_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 323
| 0
|
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def A__ ( __A , __A=7 ):
'''simple docstring'''
_lowerCamelCase : List[Any] = None
if token is not None:
_lowerCamelCase : Tuple = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
_lowerCamelCase : int = """636036"""
_lowerCamelCase : Optional[int] = F"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
_lowerCamelCase : List[Any] = requests.get(__A , headers=__A ).json()
return result["workflow_runs"]
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = get_daily_ci_runs(__A )
_lowerCamelCase : Optional[Any] = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_lowerCamelCase : str = workflow_run["""id"""]
break
return workflow_run_id
def A__ ( __A , __A , __A ):
'''simple docstring'''
_lowerCamelCase : str = get_last_daily_ci_runs(__A )
if workflow_run_id is not None:
_lowerCamelCase : Tuple = get_artifacts_links(worflow_run_id=__A , token=__A )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_lowerCamelCase : Optional[Any] = artifacts_links[artifact_name]
download_artifact(
artifact_name=__A , artifact_url=__A , output_dir=__A , token=__A )
def A__ ( __A , __A , __A ):
'''simple docstring'''
get_last_daily_ci_artifacts(__A , __A , __A )
_lowerCamelCase : List[str] = {}
for artifact_name in artifact_names:
_lowerCamelCase : str = os.path.join(__A , F"""{artifact_name}.zip""" )
if os.path.isfile(__A ):
_lowerCamelCase : Tuple = {}
with zipfile.ZipFile(__A ) as z:
for filename in z.namelist():
if not os.path.isdir(__A ):
# read the file
with z.open(__A ) as f:
_lowerCamelCase : Any = f.read().decode("""UTF-8""" )
return results
| 700
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=__A , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=__A , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=__A )
return parser.parse_args()
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = parse_args()
# Import training_script as a module.
_lowerCamelCase : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_lowerCamelCase : Optional[Any] = script_fpath.stem
_lowerCamelCase : Dict = importlib.import_module(__A )
# Patch sys.argv
_lowerCamelCase : Union[str, Any] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 15
| 0
|
"""simple docstring"""
import string
import numpy
def A_ ( lowercase , lowercase ) -> int:
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a , lowercase )
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ : Any = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
UpperCamelCase_ : Dict = numpy.vectorize(lambda lowerCamelCase_ : x % 36 )
UpperCamelCase_ : Union[str, Any] = numpy.vectorize(lowerCamelCase_ )
def __init__( self : str , a_ : numpy.ndarray )-> None:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.modulus(a_ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
UpperCAmelCase_ : Union[str, Any] = encrypt_key.shape[0]
def a ( self : str , a_ : str )-> int:
"""simple docstring"""
return self.key_string.index(a_ )
def a ( self : str , a_ : int )-> str:
"""simple docstring"""
return self.key_string[round(a_ )]
def a ( self : Any )-> None:
"""simple docstring"""
UpperCAmelCase_ : Dict = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
UpperCAmelCase_ : List[Any] = det % len(self.key_string )
UpperCAmelCase_ : List[Any] = len(self.key_string )
if greatest_common_divisor(a_ , len(self.key_string ) ) != 1:
UpperCAmelCase_ : Optional[Any] = (
f'''determinant modular {req_l} of encryption key({det}) '''
f'''is not co prime w.r.t {req_l}.\nTry another key.'''
)
raise ValueError(a_ )
def a ( self : Union[str, Any] , a_ : str )-> str:
"""simple docstring"""
UpperCAmelCase_ : int = [char for char in text.upper() if char in self.key_string]
UpperCAmelCase_ : List[str] = chars[-1]
while len(a_ ) % self.break_key != 0:
chars.append(a_ )
return "".join(a_ )
def a ( self : Optional[int] , a_ : str )-> str:
"""simple docstring"""
UpperCAmelCase_ : Dict = self.process_text(text.upper() )
UpperCAmelCase_ : Union[str, Any] = """"""
for i in range(0 , len(a_ ) - self.break_key + 1 , self.break_key ):
UpperCAmelCase_ : Tuple = text[i : i + self.break_key]
UpperCAmelCase_ : int = [self.replace_letters(a_ ) for char in batch]
UpperCAmelCase_ : List[str] = numpy.array([vec] ).T
UpperCAmelCase_ : Optional[Any] = self.modulus(self.encrypt_key.dot(a_ ) ).T.tolist()[
0
]
UpperCAmelCase_ : Tuple = """""".join(
self.replace_digits(a_ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def a ( self : str )-> numpy.ndarray:
"""simple docstring"""
UpperCAmelCase_ : Tuple = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
UpperCAmelCase_ : Dict = det % len(self.key_string )
UpperCAmelCase_ : List[Any] = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
UpperCAmelCase_ : str = i
break
UpperCAmelCase_ : int = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(a_ ) )
def a ( self : Optional[int] , a_ : str )-> str:
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.make_decrypt_key()
UpperCAmelCase_ : int = self.process_text(text.upper() )
UpperCAmelCase_ : Any = """"""
for i in range(0 , len(a_ ) - self.break_key + 1 , self.break_key ):
UpperCAmelCase_ : Union[str, Any] = text[i : i + self.break_key]
UpperCAmelCase_ : Dict = [self.replace_letters(a_ ) for char in batch]
UpperCAmelCase_ : str = numpy.array([vec] ).T
UpperCAmelCase_ : List[Any] = self.modulus(decrypt_key.dot(a_ ) ).T.tolist()[0]
UpperCAmelCase_ : Any = """""".join(
self.replace_digits(a_ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def A_ ( ) -> None:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = int(input("""Enter the order of the encryption key: """ ) )
UpperCAmelCase_ : List[Any] = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(lowercase ):
UpperCAmelCase_ : Optional[Any] = [int(lowercase ) for x in input().split()]
hill_matrix.append(lowercase )
UpperCAmelCase_ : List[str] = HillCipher(numpy.array(lowercase ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
UpperCAmelCase_ : Dict = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
UpperCAmelCase_ : List[str] = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(lowercase ) )
elif option == "2":
UpperCAmelCase_ : int = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 470
|
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class UpperCAmelCase_ (lowerCamelCase_ ):
"""simple docstring"""
UpperCamelCase_ : str = ["""image_processor""", """tokenizer"""]
UpperCamelCase_ : List[str] = """OwlViTImageProcessor"""
UpperCamelCase_ : str = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : Any , a_ : Any=None , a_ : str=None , **a_ : List[str] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , a_ , )
UpperCAmelCase_ : List[Any] = kwargs.pop("""feature_extractor""" )
UpperCAmelCase_ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(a_ , a_ )
def __call__( self : Any , a_ : Optional[int]=None , a_ : Optional[int]=None , a_ : Dict=None , a_ : int="max_length" , a_ : List[Any]="np" , **a_ : int )-> Tuple:
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(a_ , a_ ) or (isinstance(a_ , a_ ) and not isinstance(text[0] , a_ )):
UpperCAmelCase_ : List[str] = [self.tokenizer(a_ , padding=a_ , return_tensors=a_ , **a_ )]
elif isinstance(a_ , a_ ) and isinstance(text[0] , a_ ):
UpperCAmelCase_ : Optional[int] = []
# Maximum number of queries across batch
UpperCAmelCase_ : Union[str, Any] = max([len(a_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(a_ ) != max_num_queries:
UpperCAmelCase_ : str = t + [""" """] * (max_num_queries - len(a_ ))
UpperCAmelCase_ : Optional[int] = self.tokenizer(a_ , padding=a_ , return_tensors=a_ , **a_ )
encodings.append(a_ )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
UpperCAmelCase_ : List[Any] = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
UpperCAmelCase_ : Tuple = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
UpperCAmelCase_ : Tuple = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
UpperCAmelCase_ : Tuple = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
UpperCAmelCase_ : str = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 )
UpperCAmelCase_ : str = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
UpperCAmelCase_ : Union[str, Any] = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
UpperCAmelCase_ : Optional[int] = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
UpperCAmelCase_ : Union[str, Any] = BatchEncoding()
UpperCAmelCase_ : int = input_ids
UpperCAmelCase_ : List[str] = attention_mask
if query_images is not None:
UpperCAmelCase_ : Optional[int] = BatchEncoding()
UpperCAmelCase_ : Any = self.image_processor(
a_ , return_tensors=a_ , **a_ ).pixel_values
UpperCAmelCase_ : Optional[Any] = query_pixel_values
if images is not None:
UpperCAmelCase_ : str = self.image_processor(a_ , return_tensors=a_ , **a_ )
if text is not None and images is not None:
UpperCAmelCase_ : List[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
UpperCAmelCase_ : int = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def a ( self : Any , *a_ : Optional[Any] , **a_ : List[str] )-> Union[str, Any]:
"""simple docstring"""
return self.image_processor.post_process(*a_ , **a_ )
def a ( self : Tuple , *a_ : List[str] , **a_ : Dict )-> Optional[int]:
"""simple docstring"""
return self.image_processor.post_process_object_detection(*a_ , **a_ )
def a ( self : Optional[int] , *a_ : Tuple , **a_ : Optional[int] )-> Optional[Any]:
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*a_ , **a_ )
def a ( self : str , *a_ : Optional[int] , **a_ : str )-> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*a_ , **a_ )
def a ( self : str , *a_ : List[Any] , **a_ : List[str] )-> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*a_ , **a_ )
@property
def a ( self : Tuple )-> int:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , a_ , )
return self.image_processor_class
@property
def a ( self : Optional[Any] )-> int:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , a_ , )
return self.image_processor
| 470
| 1
|
"""simple docstring"""
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> bool:
if not all(x.isalpha() for x in string ):
raise ValueError("""String must only contain alphabetic characters.""" )
_SCREAMING_SNAKE_CASE : Any = sorted(string.lower() )
return len(__SCREAMING_SNAKE_CASE ) == len(set(__SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
lowerCAmelCase_ = input('''Enter a string ''').strip()
lowerCAmelCase_ = is_isogram(input_str)
print(F"{input_str} is {'an' if isogram else 'not an'} isogram.")
| 719
|
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowerCAmelCase_ = {
'''text_branch''': '''text_model''',
'''audio_branch''': '''audio_model.audio_encoder''',
'''attn''': '''attention.self''',
'''self.proj''': '''output.dense''',
'''attention.self_mask''': '''attn_mask''',
'''mlp.fc1''': '''intermediate.dense''',
'''mlp.fc2''': '''output.dense''',
'''norm1''': '''layernorm_before''',
'''norm2''': '''layernorm_after''',
'''bn0''': '''batch_norm''',
}
lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''')
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> str:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = create_model(
"""HTSAT-tiny""" , """roberta""" , __SCREAMING_SNAKE_CASE , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=__SCREAMING_SNAKE_CASE , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = {}
_SCREAMING_SNAKE_CASE : Optional[Any] = R""".*sequential.(\d+).*"""
_SCREAMING_SNAKE_CASE : Any = R""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_SCREAMING_SNAKE_CASE : Optional[Any] = key.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# replace sequential layers with list
_SCREAMING_SNAKE_CASE : List[Any] = re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 )
_SCREAMING_SNAKE_CASE : Dict = key.replace(F"""sequential.{sequential_layer}.""" , F"""layers.{int(__SCREAMING_SNAKE_CASE )//3}.linear.""" )
elif re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[str] = int(re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
_SCREAMING_SNAKE_CASE : Dict = 1 if projecton_layer == 0 else 2
_SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace(F"""_projection.{projecton_layer}.""" , F"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
_SCREAMING_SNAKE_CASE : Dict = value
_SCREAMING_SNAKE_CASE : List[Any] = mixed_qkv.size(0 ) // 3
_SCREAMING_SNAKE_CASE : Optional[Any] = mixed_qkv[:qkv_dim]
_SCREAMING_SNAKE_CASE : str = mixed_qkv[qkv_dim : qkv_dim * 2]
_SCREAMING_SNAKE_CASE : Any = mixed_qkv[qkv_dim * 2 :]
_SCREAMING_SNAKE_CASE : Dict = query_layer
_SCREAMING_SNAKE_CASE : List[Any] = key_layer
_SCREAMING_SNAKE_CASE : Dict = value_layer
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = value
return model_state_dict
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> List[Any]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = init_clap(__SCREAMING_SNAKE_CASE , enable_fusion=__SCREAMING_SNAKE_CASE )
clap_model.eval()
_SCREAMING_SNAKE_CASE : Dict = clap_model.state_dict()
_SCREAMING_SNAKE_CASE : Tuple = rename_state_dict(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : int = ClapConfig()
_SCREAMING_SNAKE_CASE : Tuple = enable_fusion
_SCREAMING_SNAKE_CASE : Dict = ClapModel(__SCREAMING_SNAKE_CASE )
# ignore the spectrogram embedding layer
model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
transformers_config.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''')
lowerCAmelCase_ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 635
| 0
|
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def UpperCAmelCase ( )-> Dict:
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(UpperCAmelCase ):
requests.request('''GET''' ,'''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''' ,'''https://huggingface.co''' ,timeout=1.0 )
@pytest.mark.integration
def UpperCAmelCase ( )-> Union[str, Any]:
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''' ,'''https://huggingface.co''' )
def UpperCAmelCase ( )-> str:
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(UpperCAmelCase ):
http_head('''https://huggingface.co''' )
| 393
|
from __future__ import annotations
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase )-> dict[str, float]:
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance < 0:
raise ValueError('''Resistance cannot be negative''' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 393
| 1
|
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
_UpperCamelCase: Tuple =logging.getLogger(__name__)
def _a ( ):
"""simple docstring"""
_lowerCAmelCase = argparse.ArgumentParser(
description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' )
parser.add_argument(
'--dataset_name' , type=__SCREAMING_SNAKE_CASE , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , )
parser.add_argument(
'--dataset_config' , type=__SCREAMING_SNAKE_CASE , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' )
parser.add_argument(
'--tokenizer_name_or_path' , type=__SCREAMING_SNAKE_CASE , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , )
parser.add_argument(
'--shard_size' , type=__SCREAMING_SNAKE_CASE , default=1000 , help='Number of entries to go in a single shard.' , )
parser.add_argument('--split' , type=__SCREAMING_SNAKE_CASE , default='train' , choices=['train', 'test', 'validation'] )
parser.add_argument(
'--limit' , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help='Limit the number of shards (used for debugging).' , )
parser.add_argument(
'--max_length' , type=__SCREAMING_SNAKE_CASE , default=512 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum'
' sequence length that is a multiple of 8.' , )
parser.add_argument(
'--output_dir' , default='tf-tpu' , type=__SCREAMING_SNAKE_CASE , help='Output directory where the TFRecord shards will be saved. If the'
' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'
' shards will be directly saved to a Google Cloud Storage bucket.' , )
_lowerCAmelCase = parser.parse_args()
return args
def _a ( __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
def fn(__SCREAMING_SNAKE_CASE : Optional[Any] ):
return tokenizer(examples['text'] )
return fn
def _a ( __SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
_lowerCAmelCase = []
for i in range(len(tokenized_data['input_ids'] ) ):
_lowerCAmelCase = {
'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ),
'attention_mask': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ),
}
_lowerCAmelCase = tf.train.Features(feature=__SCREAMING_SNAKE_CASE )
_lowerCAmelCase = tf.train.Example(features=__SCREAMING_SNAKE_CASE )
_lowerCAmelCase = example.SerializeToString()
records.append(__SCREAMING_SNAKE_CASE )
return records
def _a ( __SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
_lowerCAmelCase = min(len(__SCREAMING_SNAKE_CASE ) , args.limit )
_lowerCAmelCase = dataset.select(range(__SCREAMING_SNAKE_CASE ) )
print(f'''Limiting the dataset to {args.limit} entries.''' )
_lowerCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
_lowerCAmelCase = os.path.join(args.output_dir , args.split )
if not os.path.exists(__SCREAMING_SNAKE_CASE ):
os.makedirs(__SCREAMING_SNAKE_CASE )
else:
_lowerCAmelCase = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
_lowerCAmelCase = tokenize_function(__SCREAMING_SNAKE_CASE )
_lowerCAmelCase = dataset.map(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , num_proc=4 , remove_columns=['text'] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(__SCREAMING_SNAKE_CASE : Dict ):
# Concatenate all texts.
_lowerCAmelCase = {k: sum(examples[k] , [] ) for k in examples.keys()}
_lowerCAmelCase = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
_lowerCAmelCase = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
_lowerCAmelCase = {
k: [t[i : i + args.max_length] for i in range(0 , __SCREAMING_SNAKE_CASE , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
_lowerCAmelCase = dataset_tokenized.map(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , batch_size=1000 , num_proc=4 )
_lowerCAmelCase = 0
_lowerCAmelCase = 0
for shard in range(0 , len(__SCREAMING_SNAKE_CASE ) , args.shard_size ):
_lowerCAmelCase = grouped_dataset[shard : shard + args.shard_size]
_lowerCAmelCase = len(dataset_snapshot['input_ids'] )
_lowerCAmelCase = os.path.join(__SCREAMING_SNAKE_CASE , f'''dataset-{shard_count}-{records_containing}.tfrecord''' )
_lowerCAmelCase = get_serialized_examples(__SCREAMING_SNAKE_CASE )
with tf.io.TFRecordWriter(__SCREAMING_SNAKE_CASE ) as out_file:
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
_lowerCAmelCase = serialized_examples[i]
out_file.write(__SCREAMING_SNAKE_CASE )
print('Wrote file {} containing {} records'.format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shard_count += 1
total_records += records_containing
with open(f'''split-{args.split}-records-count.txt''' , 'w' ) as f:
print(f'''Total {args.split} records: {total_records}''' , file=__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_UpperCamelCase: str =parse_args()
main(args)
| 704
|
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def _a ( __SCREAMING_SNAKE_CASE : Dict[str, torch.Tensor] ):
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
for rt in rc.restypes:
_lowerCAmelCase = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
_lowerCAmelCase = {name: i for i, name in enumerate(__SCREAMING_SNAKE_CASE )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
_lowerCAmelCase = torch.tensor(
__SCREAMING_SNAKE_CASE , dtype=torch.intaa , device=protein['aatype'].device , )
_lowerCAmelCase = torch.tensor(
__SCREAMING_SNAKE_CASE , dtype=torch.intaa , device=protein['aatype'].device , )
_lowerCAmelCase = torch.tensor(
__SCREAMING_SNAKE_CASE , dtype=torch.floataa , device=protein['aatype'].device , )
_lowerCAmelCase = protein['aatype'].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
_lowerCAmelCase = restype_atomaa_to_atomaa[protein_aatype]
_lowerCAmelCase = restype_atomaa_mask[protein_aatype]
_lowerCAmelCase = residx_atomaa_mask
_lowerCAmelCase = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
_lowerCAmelCase = restype_atomaa_to_atomaa[protein_aatype]
_lowerCAmelCase = residx_atomaa_to_atomaa.long()
# create the corresponding mask
_lowerCAmelCase = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['aatype'].device )
for restype, restype_letter in enumerate(rc.restypes ):
_lowerCAmelCase = rc.restype_atoa[restype_letter]
_lowerCAmelCase = rc.residue_atoms[restype_name]
for atom_name in atom_names:
_lowerCAmelCase = rc.atom_order[atom_name]
_lowerCAmelCase = 1
_lowerCAmelCase = restype_atomaa_mask[protein_aatype]
_lowerCAmelCase = residx_atomaa_mask
return protein
def _a ( __SCREAMING_SNAKE_CASE : Dict[str, torch.Tensor] ):
"""simple docstring"""
_lowerCAmelCase = tree_map(lambda __SCREAMING_SNAKE_CASE : torch.tensor(__SCREAMING_SNAKE_CASE , device=batch['aatype'].device ) , __SCREAMING_SNAKE_CASE , np.ndarray )
_lowerCAmelCase = tensor_tree_map(lambda __SCREAMING_SNAKE_CASE : np.array(__SCREAMING_SNAKE_CASE ) , make_atomaa_masks(__SCREAMING_SNAKE_CASE ) )
return out
| 585
| 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def lowercase__ ( A_: int=None ) -> str:
"""simple docstring"""
if subparsers is not None:
__UpperCAmelCase =subparsers.add_parser("""test""" )
else:
__UpperCAmelCase =argparse.ArgumentParser("""Accelerate test command""" )
parser.add_argument(
"""--config_file""" , default=A_ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=A_ )
return parser
def lowercase__ ( A_: List[Any] ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase =os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["""test_utils""", """scripts""", """test_script.py"""] )
if args.config_file is None:
__UpperCAmelCase =script_name
else:
__UpperCAmelCase =F'''--config_file={args.config_file} {script_name}'''
__UpperCAmelCase =["""accelerate-launch"""] + test_args.split()
__UpperCAmelCase =execute_subprocess_async(A_ , env=os.environ.copy() )
if result.returncode == 0:
print("""Test is a success! You are ready for your distributed training!""" )
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase =test_command_parser()
__UpperCAmelCase =parser.parse_args()
test_command(A_ )
if __name__ == "__main__":
main()
| 68
|
'''simple docstring'''
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def __lowerCAmelCase ( a_ ) -> Tuple:
'''simple docstring'''
return EnvironmentCommand()
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
def _UpperCamelCase ( lowercase__ ) -> Dict:
SCREAMING_SNAKE_CASE : Optional[Any] = parser.add_parser('env' )
download_parser.set_defaults(func=lowercase__ )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE : Any = huggingface_hub.__version__
SCREAMING_SNAKE_CASE : List[str] = 'not installed'
SCREAMING_SNAKE_CASE : List[Any] = 'NA'
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE : str = torch.__version__
SCREAMING_SNAKE_CASE : Optional[int] = torch.cuda.is_available()
SCREAMING_SNAKE_CASE : str = 'not installed'
if is_transformers_available():
import transformers
SCREAMING_SNAKE_CASE : Any = transformers.__version__
SCREAMING_SNAKE_CASE : int = 'not installed'
if is_accelerate_available():
import accelerate
SCREAMING_SNAKE_CASE : Optional[Any] = accelerate.__version__
SCREAMING_SNAKE_CASE : Any = 'not installed'
if is_xformers_available():
import xformers
SCREAMING_SNAKE_CASE : Union[str, Any] = xformers.__version__
SCREAMING_SNAKE_CASE : Optional[int] = {
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': F"""{pt_version} ({pt_cuda_available})""",
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(lowercase__ ) )
return info
@staticmethod
def _UpperCamelCase ( lowercase__ ) -> Dict:
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 251
| 0
|
def A ( __UpperCamelCase ) -> int:
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError('only integers accepted as input' )
else:
A__ = str(abs(__UpperCamelCase ) )
A__ = [list(__UpperCamelCase ) for char in range(len(__UpperCamelCase ) )]
for index in range(len(__UpperCamelCase ) ):
num_transpositions[index].pop(__UpperCamelCase )
return max(
int(''.join(list(__UpperCamelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 52
|
def A ( __UpperCamelCase ) -> bool:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52
| 1
|
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ = logging.get_logger()
@dataclass
class SCREAMING_SNAKE_CASE__ :
_A = 42
_A = field(default_factory=_UpperCAmelCase )
_A = field(default_factory=_UpperCAmelCase )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = len(list(m.modules() ) ) == 1 or isinstance(lowercase__ , nn.Convad ) or isinstance(lowercase__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(lowercase__ )
def __call__( self , lowercase__ ):
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(lowercase__ )
[x.remove() for x in self.handles]
return self
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
return list(filter(lambda lowercase__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class SCREAMING_SNAKE_CASE__ :
_A = 42
_A = 42
_A = 1
_A = field(default_factory=_UpperCAmelCase )
_A = field(default_factory=_UpperCAmelCase )
_A = True
def __call__( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = Tracker(self.dest )(lowercase__ ).parametrized
SCREAMING_SNAKE_CASE_ : str = Tracker(self.src )(lowercase__ ).parametrized
SCREAMING_SNAKE_CASE_ : Dict = list(filter(lambda lowercase__ : type(lowercase__ ) not in self.src_skip , lowercase__ ) )
SCREAMING_SNAKE_CASE_ : Tuple = list(filter(lambda lowercase__ : type(lowercase__ ) not in self.dest_skip , lowercase__ ) )
if len(lowercase__ ) != len(lowercase__ ) and self.raise_if_mismatch:
raise Exception(
F"Numbers of operations are different. Source module has {len(lowercase__ )} operations while"
F" destination module has {len(lowercase__ )}." )
for dest_m, src_m in zip(lowercase__ , lowercase__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F"Transfered from={src_m} to={dest_m}" )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self , lowercase__ ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(("conv1", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("block" ), F"Unexpected layer name {k}"
SCREAMING_SNAKE_CASE_ : Dict = len(lowercase__ ) + 1
feature_blocks.append((F"res{block_index}", v) )
SCREAMING_SNAKE_CASE_ : Any = nn.ModuleDict(lowercase__ )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
return get_trunk_forward_outputs(
lowercase__ , out_feat_keys=lowercase__ , feature_blocks=self._feature_blocks , )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = x.split("-" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self , lowercase__ ):
"""simple docstring"""
if x not in self:
SCREAMING_SNAKE_CASE_ : str = self.convert_name_to_timm(lowercase__ )
SCREAMING_SNAKE_CASE_ : Dict = partial(lambda: (timm.create_model(lowercase__ , pretrained=lowercase__ ).eval(), None) )
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = super().__getitem__(lowercase__ )
return val
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __getitem__( self , lowercase__ ):
"""simple docstring"""
if "seer" in x and "in1k" not in x:
SCREAMING_SNAKE_CASE_ : List[str] = RegNetModel
else:
SCREAMING_SNAKE_CASE_ : int = RegNetForImageClassification
return val
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Tuple[str, str]] ) -> Union[str, Any]:
"""simple docstring"""
for from_key, to_key in keys:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = from_state_dict[from_key].clone()
print(F"Copied key={from_key} to={to_key}" )
return to_state_dict
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Callable[[], nn.Module] , SCREAMING_SNAKE_CASE_ : Callable[[], nn.Module] , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : Path , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Optional[Any]:
"""simple docstring"""
print(F"Converting {name}..." )
with torch.no_grad():
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Any = from_model_func()
SCREAMING_SNAKE_CASE_ : int = our_model_func(SCREAMING_SNAKE_CASE_ ).eval()
SCREAMING_SNAKE_CASE_ : Dict = ModuleTransfer(src=SCREAMING_SNAKE_CASE_ , dest=SCREAMING_SNAKE_CASE_ , raise_if_mismatch=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(SCREAMING_SNAKE_CASE_ )
if from_state_dict is not None:
SCREAMING_SNAKE_CASE_ : Any = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")]
SCREAMING_SNAKE_CASE_ : int = manually_copy_vissl_head(SCREAMING_SNAKE_CASE_ , our_model.state_dict() , SCREAMING_SNAKE_CASE_ )
our_model.load_state_dict(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = our_model(SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : str = (
our_outputs.logits if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else our_outputs.last_hidden_state
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = from_model(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : int = from_output[-1] if type(SCREAMING_SNAKE_CASE_ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
SCREAMING_SNAKE_CASE_ : List[Any] = our_outputs.hidden_states[-1]
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add model" , use_temp_dir=SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2_2_4 if "seer" not in name else 3_8_4
# we can use the convnext one
SCREAMING_SNAKE_CASE_ : List[Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" , size=SCREAMING_SNAKE_CASE_ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add image processor" , use_temp_dir=SCREAMING_SNAKE_CASE_ , )
print(F"Pushed {name}" )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Path , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : bool = True ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE_ : List[Any] = 1_0_0_0
SCREAMING_SNAKE_CASE_ : Dict = (1, num_labels)
SCREAMING_SNAKE_CASE_ : Tuple = "huggingface/label-files"
SCREAMING_SNAKE_CASE_ : str = num_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) ) , "r" ) )
SCREAMING_SNAKE_CASE_ : int = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ : Optional[int] = idalabel
SCREAMING_SNAKE_CASE_ : List[str] = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ : Any = partial(SCREAMING_SNAKE_CASE_ , num_labels=SCREAMING_SNAKE_CASE_ , idalabel=SCREAMING_SNAKE_CASE_ , labelaid=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : List[Any] = {
"regnet-x-002": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[2_4, 5_6, 1_5_2, 3_6_8] , groups_width=8 , layer_type="x" ),
"regnet-x-004": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 1_2] , hidden_sizes=[3_2, 6_4, 1_6_0, 3_8_4] , groups_width=1_6 , layer_type="x" ),
"regnet-x-006": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[4_8, 9_6, 2_4_0, 5_2_8] , groups_width=2_4 , layer_type="x" ),
"regnet-x-008": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[6_4, 1_2_8, 2_8_8, 6_7_2] , groups_width=1_6 , layer_type="x" ),
"regnet-x-016": ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 2] , hidden_sizes=[7_2, 1_6_8, 4_0_8, 9_1_2] , groups_width=2_4 , layer_type="x" ),
"regnet-x-032": ImageNetPreTrainedConfig(
depths=[2, 6, 1_5, 2] , hidden_sizes=[9_6, 1_9_2, 4_3_2, 1_0_0_8] , groups_width=4_8 , layer_type="x" ),
"regnet-x-040": ImageNetPreTrainedConfig(
depths=[2, 5, 1_4, 2] , hidden_sizes=[8_0, 2_4_0, 5_6_0, 1_3_6_0] , groups_width=4_0 , layer_type="x" ),
"regnet-x-064": ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 1] , hidden_sizes=[1_6_8, 3_9_2, 7_8_4, 1_6_2_4] , groups_width=5_6 , layer_type="x" ),
"regnet-x-080": ImageNetPreTrainedConfig(
depths=[2, 5, 1_5, 1] , hidden_sizes=[8_0, 2_4_0, 7_2_0, 1_9_2_0] , groups_width=1_2_0 , layer_type="x" ),
"regnet-x-120": ImageNetPreTrainedConfig(
depths=[2, 5, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 8_9_6, 2_2_4_0] , groups_width=1_1_2 , layer_type="x" ),
"regnet-x-160": ImageNetPreTrainedConfig(
depths=[2, 6, 1_3, 1] , hidden_sizes=[2_5_6, 5_1_2, 8_9_6, 2_0_4_8] , groups_width=1_2_8 , layer_type="x" ),
"regnet-x-320": ImageNetPreTrainedConfig(
depths=[2, 7, 1_3, 1] , hidden_sizes=[3_3_6, 6_7_2, 1_3_4_4, 2_5_2_0] , groups_width=1_6_8 , layer_type="x" ),
# y variant
"regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[2_4, 5_6, 1_5_2, 3_6_8] , groups_width=8 ),
"regnet-y-004": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[4_8, 1_0_4, 2_0_8, 4_4_0] , groups_width=8 ),
"regnet-y-006": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[4_8, 1_1_2, 2_5_6, 6_0_8] , groups_width=1_6 ),
"regnet-y-008": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[6_4, 1_2_8, 3_2_0, 7_6_8] , groups_width=1_6 ),
"regnet-y-016": ImageNetPreTrainedConfig(
depths=[2, 6, 1_7, 2] , hidden_sizes=[4_8, 1_2_0, 3_3_6, 8_8_8] , groups_width=2_4 ),
"regnet-y-032": ImageNetPreTrainedConfig(
depths=[2, 5, 1_3, 1] , hidden_sizes=[7_2, 2_1_6, 5_7_6, 1_5_1_2] , groups_width=2_4 ),
"regnet-y-040": ImageNetPreTrainedConfig(
depths=[2, 6, 1_2, 2] , hidden_sizes=[1_2_8, 1_9_2, 5_1_2, 1_0_8_8] , groups_width=6_4 ),
"regnet-y-064": ImageNetPreTrainedConfig(
depths=[2, 7, 1_4, 2] , hidden_sizes=[1_4_4, 2_8_8, 5_7_6, 1_2_9_6] , groups_width=7_2 ),
"regnet-y-080": ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 1] , hidden_sizes=[1_6_8, 4_4_8, 8_9_6, 2_0_1_6] , groups_width=5_6 ),
"regnet-y-120": ImageNetPreTrainedConfig(
depths=[2, 5, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 8_9_6, 2_2_4_0] , groups_width=1_1_2 ),
"regnet-y-160": ImageNetPreTrainedConfig(
depths=[2, 4, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 1_2_3_2, 3_0_2_4] , groups_width=1_1_2 ),
"regnet-y-320": ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"regnet-y-320-seer": RegNetConfig(depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
"regnet-y-640-seer": RegNetConfig(depths=[2, 5, 1_2, 1] , hidden_sizes=[3_2_8, 9_8_4, 1_9_6_8, 4_9_2_0] , groups_width=3_2_8 ),
"regnet-y-1280-seer": RegNetConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[5_2_8, 1_0_5_6, 2_9_0_4, 7_3_9_2] , groups_width=2_6_4 ),
"regnet-y-2560-seer": RegNetConfig(
depths=[3, 7, 1_6, 1] , hidden_sizes=[6_4_0, 1_6_9_6, 2_5_4_4, 5_0_8_8] , groups_width=6_4_0 ),
"regnet-y-10b-seer": ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[2_0_2_0, 4_0_4_0, 1_1_1_1_0, 2_8_2_8_0] , groups_width=1_0_1_0 ),
# finetuned on imagenet
"regnet-y-320-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
"regnet-y-640-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[3_2_8, 9_8_4, 1_9_6_8, 4_9_2_0] , groups_width=3_2_8 ),
"regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[5_2_8, 1_0_5_6, 2_9_0_4, 7_3_9_2] , groups_width=2_6_4 ),
"regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig(
depths=[3, 7, 1_6, 1] , hidden_sizes=[6_4_0, 1_6_9_6, 2_5_4_4, 5_0_8_8] , groups_width=6_4_0 ),
"regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[2_0_2_0, 4_0_4_0, 1_1_1_1_0, 2_8_2_8_0] , groups_width=1_0_1_0 ),
}
SCREAMING_SNAKE_CASE_ : List[Any] = NameToOurModelFuncMap()
SCREAMING_SNAKE_CASE_ : str = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , model_dir=str(SCREAMING_SNAKE_CASE_ ) , map_location="cpu" )
SCREAMING_SNAKE_CASE_ : int = model_func()
# check if we have a head, if yes add it
SCREAMING_SNAKE_CASE_ : Tuple = files["classy_state_dict"]["base_model"]["model"]
SCREAMING_SNAKE_CASE_ : List[str] = model_state_dict["trunk"]
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
return model.eval(), model_state_dict["heads"]
# pretrained
SCREAMING_SNAKE_CASE_ : List[str] = partial(
SCREAMING_SNAKE_CASE_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = partial(
SCREAMING_SNAKE_CASE_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
SCREAMING_SNAKE_CASE_ : str = partial(
SCREAMING_SNAKE_CASE_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
SCREAMING_SNAKE_CASE_ : int = partial(
SCREAMING_SNAKE_CASE_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=2_7 , group_width=1_0_1_0 , w_a=1_7_4_4 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
SCREAMING_SNAKE_CASE_ : List[Any] = partial(
SCREAMING_SNAKE_CASE_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
SCREAMING_SNAKE_CASE_ : List[str] = partial(
SCREAMING_SNAKE_CASE_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
SCREAMING_SNAKE_CASE_ : Any = partial(
SCREAMING_SNAKE_CASE_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
SCREAMING_SNAKE_CASE_ : Optional[int] = partial(
SCREAMING_SNAKE_CASE_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=2_7 , group_width=1_0_1_0 , w_a=1_7_4_4 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
SCREAMING_SNAKE_CASE_ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
SCREAMING_SNAKE_CASE_ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
return config, expected_shape
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported regnet* architecture,'
' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
snake_case_ = parser.parse_args()
snake_case_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 421
|
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = (1 + 2_4 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int = 5_0_0_0 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = [(i * (3 * i - 1)) // 2 for i in range(1 , SCREAMING_SNAKE_CASE_ )]
for i, pentagonal_i in enumerate(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ):
SCREAMING_SNAKE_CASE_ : Dict = pentagonal_nums[j]
SCREAMING_SNAKE_CASE_ : Optional[int] = pentagonal_i + pentagonal_j
SCREAMING_SNAKE_CASE_ : Tuple = pentagonal_j - pentagonal_i
if is_pentagonal(SCREAMING_SNAKE_CASE_ ) and is_pentagonal(SCREAMING_SNAKE_CASE_ ):
return b
return -1
if __name__ == "__main__":
print(F'''{solution() = }''')
| 421
| 1
|
"""simple docstring"""
def lowercase ( A_ = 10 , A_ = 1_000 , A_ = True )-> int:
'''simple docstring'''
assert (
isinstance(A_ , A_ )
and isinstance(A_ , A_ )
and isinstance(A_ , A_ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)" )
return min_val if option else max_val
def lowercase ( A_ , A_ )-> int:
'''simple docstring'''
return int((number_a + number_a) / 2 )
def lowercase ( A_ , A_ , A_ )-> None:
'''simple docstring'''
assert (
isinstance(A_ , A_ ) and isinstance(A_ , A_ ) and isinstance(A_ , A_ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)" )
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value" )
def answer(A_ ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started..." )
a : Any = lower
a : Any = higher
a : str = []
while True:
a : List[str] = get_avg(A_ , A_ )
last_numbers.append(A_ )
if answer(A_ ) == "low":
a : str = number
elif answer(A_ ) == "high":
a : List[str] = number
else:
break
print(F'''guess the number : {last_numbers[-1]}''' )
print(F'''details : {last_numbers!s}''' )
def lowercase ( )-> None:
'''simple docstring'''
a : Tuple = int(input("Enter lower value : " ).strip() )
a : Optional[Any] = int(input("Enter high value : " ).strip() )
a : Tuple = int(input("Enter value to guess : " ).strip() )
guess_the_number(A_ , A_ , A_ )
if __name__ == "__main__":
main()
| 709
|
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Dict):
a : Tuple = 0
a : Any = [0]
a : List[Any] = [0]
a : List[Any] = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0)
a : List[str] = [60]
a : Dict = [10]
a : Optional[int] = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0)
def __snake_case ( self : Optional[Any]):
a : Union[str, Any] = 3
a : Any = [1, 2, 3]
a : List[Any] = [3, 2, 1]
a : Optional[Any] = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 5)
def __snake_case ( self : str):
a : int = 50
a : Dict = [60, 100, 120]
a : Tuple = [10, 20, 30]
a : Optional[Any] = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 220)
if __name__ == "__main__":
unittest.main()
| 135
| 0
|
'''simple docstring'''
def _a (lowercase__ : int ) -> bool:
"""simple docstring"""
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError('check_bouncy() accepts only integer arguments' )
__snake_case = str(lowercase__ )
__snake_case = ''.join(sorted(lowercase__ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _a (lowercase__ : float = 9_9 ) -> int:
"""simple docstring"""
if not 0 < percent < 1_0_0:
raise ValueError('solution() only accepts values from 0 to 100' )
__snake_case = 0
__snake_case = 1
while True:
if check_bouncy(lowercase__ ):
bouncy_num += 1
if (bouncy_num / num) * 1_0_0 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(99)}''')
| 56
|
'''simple docstring'''
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : str = CpmAntTokenizer
_SCREAMING_SNAKE_CASE : Optional[Any] = False
def a ( self : Optional[Any] ) -> Any:
super().setUp()
__snake_case = [
'<d>',
'</d>',
'<s>',
'</s>',
'</_>',
'<unk>',
'<pad>',
'</n>',
'我',
'是',
'C',
'P',
'M',
'A',
'n',
't',
]
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
@tooslow
def a ( self : List[Any] ) -> Dict:
__snake_case = CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b' )
__snake_case = '今天天气真好!'
__snake_case = ['今天', '天气', '真', '好', '!']
__snake_case = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = '今天天气真好!'
__snake_case = [tokenizer.bos_token] + tokens
__snake_case = [6, 9802, 1_4962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 56
| 1
|
from sklearn.metrics import fa_score
import datasets
lowercase : List[str] = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
lowercase : Any = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
lowercase : Union[str, Any] = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE="binary" , SCREAMING_SNAKE_CASE=None ) -> Tuple:
"""simple docstring"""
A : Optional[Any] = fa_score(
UpperCamelCase__ , UpperCamelCase__ , labels=UpperCamelCase__ , pos_label=UpperCamelCase__ , average=UpperCamelCase__ , sample_weight=UpperCamelCase__ )
return {"f1": float(UpperCamelCase__ ) if score.size == 1 else score}
| 711
|
'''simple docstring'''
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : int = torch.load(snake_case__ , map_location='''cpu''' )
A : Optional[Any] = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
A : Any = {}
for k, v in state_dict.items():
if "pred_layer" in k:
A : Any = v
else:
A : int = v
A : str = chkpt['''params''']
A : Tuple = {n: v for n, v in config.items() if not isinstance(snake_case__ , (torch.FloatTensor, numpy.ndarray) )}
A : Dict = chkpt['''dico_word2id''']
A : Optional[int] = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 13 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
A : str = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
A : Dict = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
A : Optional[int] = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(snake_case__ , snake_case__ )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(snake_case__ , indent=2 ) + '''\n''' )
print(F'Save vocab file to {pytorch_config_dump_path}' )
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(snake_case__ , indent=2 ) + '''\n''' )
if __name__ == "__main__":
lowercase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase : int = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 343
| 0
|
"""simple docstring"""
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
_lowerCAmelCase = 4
_lowerCAmelCase = 3
class UpperCamelCase (__snake_case ):
pass
def UpperCamelCase ( _A ) -> str:
for shard in shards:
for i in range(_lowerCamelCase ):
yield {"i": i, "shard": shard}
def UpperCamelCase ( ) -> Any:
lowercase : List[str] = int(os.environ["""RANK"""] )
lowercase : str = int(os.environ["""WORLD_SIZE"""] )
lowercase : Dict = ArgumentParser()
parser.add_argument("""--streaming""" , type=_lowerCamelCase )
parser.add_argument("""--local_rank""" , type=_lowerCamelCase )
parser.add_argument("""--num_workers""" , type=_lowerCamelCase , default=0 )
lowercase : List[Any] = parser.parse_args()
lowercase : Any = args.streaming
lowercase : str = args.num_workers
lowercase : int = {"shards": [F"""shard_{shard_idx}""" for shard_idx in range(_lowerCamelCase )]}
lowercase : Optional[int] = IterableDataset.from_generator(_lowerCamelCase , gen_kwargs=_lowerCamelCase )
if not streaming:
lowercase : str = Dataset.from_list(list(_lowerCamelCase ) )
lowercase : Union[str, Any] = split_dataset_by_node(_lowerCamelCase , rank=_lowerCamelCase , world_size=_lowerCamelCase )
lowercase : Optional[Any] = torch.utils.data.DataLoader(_lowerCamelCase , num_workers=_lowerCamelCase )
lowercase : Optional[Any] = NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowercase : str = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
lowercase : Tuple = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 264
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
_snake_case = logging.get_logger(__name__)
_snake_case = {
"openai/imagegpt-small": "",
"openai/imagegpt-medium": "",
"openai/imagegpt-large": "",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'imagegpt'
lowerCamelCase__ = ['past_key_values']
lowerCamelCase__ = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self, __a=512 + 1, __a=32 * 32, __a=512, __a=24, __a=8, __a=None, __a="quick_gelu", __a=0.1, __a=0.1, __a=0.1, __a=1E-5, __a=0.02, __a=True, __a=True, __a=False, __a=False, __a=False, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = vocab_size
_lowerCAmelCase : Any = n_positions
_lowerCAmelCase : Any = n_embd
_lowerCAmelCase : Tuple = n_layer
_lowerCAmelCase : str = n_head
_lowerCAmelCase : int = n_inner
_lowerCAmelCase : Union[str, Any] = activation_function
_lowerCAmelCase : Optional[Any] = resid_pdrop
_lowerCAmelCase : Dict = embd_pdrop
_lowerCAmelCase : Optional[Any] = attn_pdrop
_lowerCAmelCase : List[str] = layer_norm_epsilon
_lowerCAmelCase : Optional[int] = initializer_range
_lowerCAmelCase : Optional[Any] = scale_attn_weights
_lowerCAmelCase : Optional[int] = use_cache
_lowerCAmelCase : Dict = scale_attn_by_inverse_layer_idx
_lowerCAmelCase : Tuple = reorder_and_upcast_attn
_lowerCAmelCase : List[Any] = tie_word_embeddings
super().__init__(tie_word_embeddings=__a, **__a)
class UpperCAmelCase_ ( a):
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
])
def snake_case__ ( self, __a, __a = 1, __a = -1, __a = False, __a = None, __a = 3, __a = 32, __a = 32, ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self._generate_dummy_images(__a, __a, __a, __a)
_lowerCAmelCase : Optional[Any] = dict(preprocessor(images=__a, return_tensors=__a))
return inputs
| 500
| 0
|
def _SCREAMING_SNAKE_CASE ( a , a = 0 ) -> list:
__A : Union[str, Any] = length or len(a )
__A : Optional[int] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
__A , __A : Union[str, Any] = list_data[i + 1], list_data[i]
__A : List[Any] = True
return list_data if not swapped else bubble_sort(a , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 77
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCAmelCase : Dict = logging.get_logger(__name__)
UpperCAmelCase : str = {
'''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''',
'''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''',
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''',
'''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''',
'''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''',
'''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''',
'''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''',
'''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''',
'''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''',
'''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''',
'''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''',
'''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''',
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : List[str] = '''codegen'''
UpperCamelCase : List[str] = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _A=50400 , _A=2048 , _A=2048 , _A=4096 , _A=28 , _A=16 , _A=64 , _A=None , _A="gelu_new" , _A=0.0 , _A=0.0 , _A=0.0 , _A=1e-5 , _A=0.0_2 , _A=True , _A=50256 , _A=50256 , _A=False , **_A , ):
__A : Any = vocab_size
__A : Tuple = n_ctx
__A : Union[str, Any] = n_positions
__A : Optional[Any] = n_embd
__A : Any = n_layer
__A : Dict = n_head
__A : Union[str, Any] = n_inner
__A : List[Any] = rotary_dim
__A : str = activation_function
__A : Any = resid_pdrop
__A : Tuple = embd_pdrop
__A : Tuple = attn_pdrop
__A : Union[str, Any] = layer_norm_epsilon
__A : str = initializer_range
__A : Optional[Any] = use_cache
__A : Union[str, Any] = bos_token_id
__A : Tuple = eos_token_id
super().__init__(
bos_token_id=_A , eos_token_id=_A , tie_word_embeddings=_A , **_A )
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , _A , _A = "default" , _A = None , _A = False , ):
super().__init__(_A , task=_A , patching_specs=_A , use_past=_A )
if not getattr(self._config , 'pad_token_id' , _A ):
# TODO: how to do that better?
__A : Dict = 0
@property
def UpperCAmelCase_ ( self ):
__A : List[str] = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(_A , direction='inputs' )
__A : Tuple = {0: 'batch', 1: 'past_sequence + sequence'}
else:
__A : int = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def UpperCAmelCase_ ( self ):
return self._config.n_layer
@property
def UpperCAmelCase_ ( self ):
return self._config.n_head
def UpperCAmelCase_ ( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ):
__A : Any = super(_A , self ).generate_dummy_inputs(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
# We need to order the input in the way they appears in the forward()
__A : str = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__A , __A : Any = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__A : Any = seqlen + 2
__A : List[str] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__A : Optional[Any] = [
(torch.zeros(_A ), torch.zeros(_A )) for _ in range(self.num_layers )
]
__A : Tuple = common_inputs['attention_mask']
if self.use_past:
__A : str = ordered_inputs['attention_mask'].dtype
__A : List[Any] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(_A , _A , dtype=_A )] , dim=1 )
return ordered_inputs
@property
def UpperCAmelCase_ ( self ):
return 13
| 77
| 1
|
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
UpperCAmelCase_ = {
"""n_samples""": 6_4,
"""horizon""": 3_2,
"""num_inference_steps""": 2_0,
"""n_guide_steps""": 2, # can set to 0 for faster sampling, does not use value network
"""scale_grad_by_std""": True,
"""scale""": 0.1,
"""eta""": 0.0,
"""t_grad_cutoff""": 2,
"""device""": """cpu""",
}
if __name__ == "__main__":
UpperCAmelCase_ = """hopper-medium-v2"""
UpperCAmelCase_ = gym.make(env_name)
UpperCAmelCase_ = ValueGuidedRLPipeline.from_pretrained(
"""bglick13/hopper-medium-v2-value-function-hor32""",
env=env,
)
env.seed(0)
UpperCAmelCase_ = env.reset()
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 1_0_0_0
UpperCAmelCase_ = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
UpperCAmelCase_ = pipeline(obs, planning_horizon=3_2)
# execute action in environment
UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = env.step(denorm_actions)
UpperCAmelCase_ = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f'Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'
f' {total_score}'
)
# save observations for rendering
rollout.append(next_observation.copy())
UpperCAmelCase_ = next_observation
except KeyboardInterrupt:
pass
print(f'Total reward: {total_reward}')
| 2
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_lowerCamelCase : Tuple = logging.get_logger(__name__)
# General docstring
_lowerCamelCase : Union[str, Any] = '''ResNetConfig'''
# Base docstring
_lowerCamelCase : int = '''microsoft/resnet-50'''
_lowerCamelCase : Optional[Any] = [1, 2_048, 7, 7]
# Image classification docstring
_lowerCamelCase : int = '''microsoft/resnet-50'''
_lowerCamelCase : Optional[int] = '''tiger cat'''
_lowerCamelCase : str = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int , lowercase : int , lowercase : int = 3 , lowercase : int = 1 , lowercase : str = "relu" ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Convad(
lowercase , lowercase , kernel_size=lowercase , stride=lowercase , padding=kernel_size // 2 , bias=lowercase )
_snake_case = nn.BatchNormad(lowercase )
_snake_case = ACTaFN[activation] if activation is not None else nn.Identity()
def A ( self : Union[str, Any] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = self.convolution(lowercase )
_snake_case = self.normalization(lowercase )
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : ResNetConfig ):
'''simple docstring'''
super().__init__()
_snake_case = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_snake_case = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_snake_case = config.num_channels
def A ( self : Tuple , lowercase : Tensor ):
'''simple docstring'''
_snake_case = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_snake_case = self.embedder(lowercase )
_snake_case = self.pooler(lowercase )
return embedding
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase : int , lowercase : int , lowercase : int = 2 ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Convad(lowercase , lowercase , kernel_size=1 , stride=lowercase , bias=lowercase )
_snake_case = nn.BatchNormad(lowercase )
def A ( self : List[str] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = self.convolution(lowercase )
_snake_case = self.normalization(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : int , lowercase : int , lowercase : int = 1 , lowercase : str = "relu" ):
'''simple docstring'''
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = (
ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , activation=lowercase ) , )
_snake_case = ACTaFN[activation]
def A ( self : List[str] , lowercase : List[str] ):
'''simple docstring'''
_snake_case = hidden_state
_snake_case = self.layer(lowercase )
_snake_case = self.shortcut(lowercase )
hidden_state += residual
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int , lowercase : int , lowercase : int = 1 , lowercase : str = "relu" , lowercase : int = 4 ):
'''simple docstring'''
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = out_channels // reduction
_snake_case = (
ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowercase , lowercase , kernel_size=1 ) , ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , kernel_size=1 , activation=lowercase ) , )
_snake_case = ACTaFN[activation]
def A ( self : Dict , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = hidden_state
_snake_case = self.layer(lowercase )
_snake_case = self.shortcut(lowercase )
hidden_state += residual
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowercase : ResNetConfig , lowercase : int , lowercase : int , lowercase : int = 2 , lowercase : int = 2 , ):
'''simple docstring'''
super().__init__()
_snake_case = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
_snake_case = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowercase , lowercase , stride=lowercase , activation=config.hidden_act ) , *[layer(lowercase , lowercase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def A ( self : List[str] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = input
for layer in self.layers:
_snake_case = layer(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : ResNetConfig ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase , config.depths[1:] ):
self.stages.append(ResNetStage(lowercase , lowercase , lowercase , depth=lowercase ) )
def A ( self : str , lowercase : Tensor , lowercase : bool = False , lowercase : bool = True ):
'''simple docstring'''
_snake_case = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
_snake_case = stage_module(lowercase )
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowercase , hidden_states=lowercase , )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = ResNetConfig
_UpperCAmelCase : Tuple = "resnet"
_UpperCAmelCase : Optional[Any] = "pixel_values"
_UpperCAmelCase : Dict = True
def A ( self : List[str] , lowercase : Dict ):
'''simple docstring'''
if isinstance(lowercase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowercase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def A ( self : Tuple , lowercase : List[Any] , lowercase : Optional[Any]=False ):
'''simple docstring'''
if isinstance(lowercase , lowercase ):
_snake_case = value
_lowerCamelCase : str = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowerCamelCase : int = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare ResNet model outputting raw features without any specific head on top." ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : Any ):
'''simple docstring'''
super().__init__(lowercase )
_snake_case = config
_snake_case = ResNetEmbeddings(lowercase )
_snake_case = ResNetEncoder(lowercase )
_snake_case = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A ( self : Union[str, Any] , lowercase : Tensor , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None ):
'''simple docstring'''
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.embedder(lowercase )
_snake_case = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = encoder_outputs[0]
_snake_case = self.pooler(lowercase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase : int ):
'''simple docstring'''
super().__init__(lowercase )
_snake_case = config.num_labels
_snake_case = ResNetModel(lowercase )
# classification head
_snake_case = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A ( self : Union[str, Any] , lowercase : Optional[torch.FloatTensor] = None , lowercase : Optional[torch.LongTensor] = None , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None , ):
'''simple docstring'''
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.resnet(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = outputs.pooler_output if return_dict else outputs[1]
_snake_case = self.classifier(lowercase )
_snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case = 'single_label_classification'
else:
_snake_case = 'multi_label_classification'
if self.config.problem_type == "regression":
_snake_case = MSELoss()
if self.num_labels == 1:
_snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_snake_case = loss_fct(lowercase , lowercase )
elif self.config.problem_type == "single_label_classification":
_snake_case = CrossEntropyLoss()
_snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_snake_case = BCEWithLogitsLoss()
_snake_case = loss_fct(lowercase , lowercase )
if not return_dict:
_snake_case = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n " ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , lowercase : Union[str, Any] ):
'''simple docstring'''
super().__init__(lowercase )
super()._init_backbone(lowercase )
_snake_case = [config.embedding_size] + config.hidden_sizes
_snake_case = ResNetEmbeddings(lowercase )
_snake_case = ResNetEncoder(lowercase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@replace_return_docstrings(output_type=lowercase , config_class=_CONFIG_FOR_DOC )
def A ( self : Dict , lowercase : Tensor , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None ):
'''simple docstring'''
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = self.embedder(lowercase )
_snake_case = self.encoder(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = outputs.hidden_states
_snake_case = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_snake_case = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowercase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowercase , )
| 686
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCamelCase__ : Any = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
@dataclass
class __magic_name__ :
'''simple docstring'''
__lowercase : Optional[str] = field(
default='cifar10' ,metadata={'help': 'Name of a dataset from the datasets package'} )
__lowercase : Optional[str] = field(
default=snake_case_ ,metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__lowercase : Optional[str] = field(
default=snake_case_ ,metadata={'help': 'The column name of the images in the files.'} )
__lowercase : Optional[str] = field(default=snake_case_ ,metadata={'help': 'A folder containing the training data.'} )
__lowercase : Optional[str] = field(default=snake_case_ ,metadata={'help': 'A folder containing the validation data.'} )
__lowercase : Optional[float] = field(
default=0.15 ,metadata={'help': 'Percent to split off of train for validation.'} )
__lowercase : Optional[int] = field(
default=snake_case_ ,metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} ,)
__lowercase : Optional[int] = field(
default=snake_case_ ,metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} ,)
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = {}
if self.train_dir is not None:
snake_case__ = self.train_dir
if self.validation_dir is not None:
snake_case__ = self.validation_dir
snake_case__ = data_files if data_files else None
@dataclass
class __magic_name__ :
'''simple docstring'''
__lowercase : str = field(
default=snake_case_ ,metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} ,)
__lowercase : Optional[str] = field(
default=snake_case_ ,metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'} )
__lowercase : Optional[str] = field(
default=snake_case_ ,metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} ,)
__lowercase : Optional[str] = field(
default=snake_case_ ,metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
__lowercase : str = field(
default='main' ,metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} ,)
__lowercase : str = field(default=snake_case_ ,metadata={'help': 'Name or path of preprocessor config.'} )
__lowercase : bool = field(
default=snake_case_ ,metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} ,)
__lowercase : float = field(
default=0.75 ,metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'} )
__lowercase : bool = field(
default=snake_case_ ,metadata={'help': 'Whether or not to train with normalized pixel values as target.'} )
@dataclass
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : float = field(
default=1E-3 ,metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'} )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[str]:
snake_case__ = torch.stack([example['''pixel_values'''] for example in examples] )
return {"pixel_values": pixel_values}
def SCREAMING_SNAKE_CASE ( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case__ = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case__ , snake_case__ , snake_case__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case__ , snake_case__ , snake_case__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mae''' , __lowerCAmelCase , __lowerCAmelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case__ = training_args.get_process_log_level()
logger.setLevel(__lowerCAmelCase )
transformers.utils.logging.set_verbosity(__lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
snake_case__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
snake_case__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
snake_case__ = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __lowerCAmelCase ) and data_args.train_val_split > 0.0:
snake_case__ = ds['''train'''].train_test_split(data_args.train_val_split )
snake_case__ = split['''train''']
snake_case__ = split['''test''']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case__ = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
snake_case__ = ViTMAEConfig.from_pretrained(model_args.config_name , **__lowerCAmelCase )
elif model_args.model_name_or_path:
snake_case__ = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__lowerCAmelCase )
else:
snake_case__ = ViTMAEConfig()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
# adapt config
config.update(
{
'''mask_ratio''': model_args.mask_ratio,
'''norm_pix_loss''': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
snake_case__ = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__lowerCAmelCase )
elif model_args.model_name_or_path:
snake_case__ = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__lowerCAmelCase )
else:
snake_case__ = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
snake_case__ = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
snake_case__ = ViTMAEForPreTraining(__lowerCAmelCase )
if training_args.do_train:
snake_case__ = ds['''train'''].column_names
else:
snake_case__ = ds['''validation'''].column_names
if data_args.image_column_name is not None:
snake_case__ = data_args.image_column_name
elif "image" in column_names:
snake_case__ = '''image'''
elif "img" in column_names:
snake_case__ = '''img'''
else:
snake_case__ = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
snake_case__ = image_processor.size['''shortest_edge''']
else:
snake_case__ = (image_processor.size['''height'''], image_processor.size['''width'''])
snake_case__ = Compose(
[
Lambda(lambda __lowerCAmelCase : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(__lowerCAmelCase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(__lowerCAmelCase ):
snake_case__ = [transforms(__lowerCAmelCase ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
snake_case__ = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__lowerCAmelCase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
snake_case__ = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__lowerCAmelCase )
# Compute absolute learning rate
snake_case__ = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
snake_case__ = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
snake_case__ = Trainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=__lowerCAmelCase , data_collator=__lowerCAmelCase , )
# Training
if training_args.do_train:
snake_case__ = None
if training_args.resume_from_checkpoint is not None:
snake_case__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case__ = last_checkpoint
snake_case__ = trainer.train(resume_from_checkpoint=__lowerCAmelCase )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
snake_case__ = trainer.evaluate()
trainer.log_metrics('''eval''' , __lowerCAmelCase )
trainer.save_metrics('''eval''' , __lowerCAmelCase )
# Write model card and (optionally) push to hub
snake_case__ = {
'''tasks''': '''masked-auto-encoding''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-auto-encoding'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowerCAmelCase )
else:
trainer.create_model_card(**__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> str:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 208
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase__ : int = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : int = ['pixel_values']
def __init__( self:List[str] , _a:bool = True , _a:Dict[str, int] = None , _a:PILImageResampling = PILImageResampling.BICUBIC , _a:bool = True , _a:Dict[str, int] = None , _a:bool = True , _a:Union[int, float] = 1 / 2_55 , _a:bool = True , _a:Optional[Union[float, List[float]]] = None , _a:Optional[Union[float, List[float]]] = None , _a:bool = True , **_a:Union[str, Any] , ):
super().__init__(**_a )
snake_case__ = size if size is not None else {'''shortest_edge''': 2_24}
snake_case__ = get_size_dict(_a , default_to_square=_a )
snake_case__ = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
snake_case__ = get_size_dict(_a , default_to_square=_a , param_name='''crop_size''' )
snake_case__ = do_resize
snake_case__ = size
snake_case__ = resample
snake_case__ = do_center_crop
snake_case__ = crop_size
snake_case__ = do_rescale
snake_case__ = rescale_factor
snake_case__ = do_normalize
snake_case__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
snake_case__ = image_std if image_std is not None else OPENAI_CLIP_STD
snake_case__ = do_convert_rgb
def SCREAMING_SNAKE_CASE__ ( self:str , _a:np.ndarray , _a:Dict[str, int] , _a:PILImageResampling = PILImageResampling.BICUBIC , _a:Optional[Union[str, ChannelDimension]] = None , **_a:str , ):
snake_case__ = get_size_dict(_a , default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
snake_case__ = get_resize_output_image_size(_a , size=size['''shortest_edge'''] , default_to_square=_a )
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:np.ndarray , _a:Dict[str, int] , _a:Optional[Union[str, ChannelDimension]] = None , **_a:Any , ):
snake_case__ = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(_a , size=(size['''height'''], size['''width''']) , data_format=_a , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:np.ndarray , _a:Union[int, float] , _a:Optional[Union[str, ChannelDimension]] = None , **_a:List[Any] , ):
return rescale(_a , scale=_a , data_format=_a , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:np.ndarray , _a:Union[float, List[float]] , _a:Union[float, List[float]] , _a:Optional[Union[str, ChannelDimension]] = None , **_a:Tuple , ):
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:ImageInput , _a:bool = None , _a:Dict[str, int] = None , _a:PILImageResampling = None , _a:bool = None , _a:int = None , _a:bool = None , _a:float = None , _a:bool = None , _a:Optional[Union[float, List[float]]] = None , _a:Optional[Union[float, List[float]]] = None , _a:bool = None , _a:Optional[Union[str, TensorType]] = None , _a:Optional[ChannelDimension] = ChannelDimension.FIRST , **_a:Any , ):
snake_case__ = do_resize if do_resize is not None else self.do_resize
snake_case__ = size if size is not None else self.size
snake_case__ = get_size_dict(_a , param_name='''size''' , default_to_square=_a )
snake_case__ = resample if resample is not None else self.resample
snake_case__ = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case__ = crop_size if crop_size is not None else self.crop_size
snake_case__ = get_size_dict(_a , param_name='''crop_size''' , default_to_square=_a )
snake_case__ = do_rescale if do_rescale is not None else self.do_rescale
snake_case__ = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case__ = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ = image_mean if image_mean is not None else self.image_mean
snake_case__ = image_std if image_std is not None else self.image_std
snake_case__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case__ = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case__ = [convert_to_rgb(_a ) for image in images]
# All transformations expect numpy arrays.
snake_case__ = [to_numpy_array(_a ) for image in images]
if do_resize:
snake_case__ = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_center_crop:
snake_case__ = [self.center_crop(image=_a , size=_a ) for image in images]
if do_rescale:
snake_case__ = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
snake_case__ = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
snake_case__ = [to_channel_dimension_format(_a , _a ) for image in images]
snake_case__ = {'''pixel_values''': images}
return BatchFeature(data=_a , tensor_type=_a )
| 208
| 1
|
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = (KDPMaDiscreteScheduler,)
__lowerCAmelCase = 10
def _lowerCamelCase ( self , **_UpperCAmelCase ):
__a : Dict = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
}
config.update(**_UpperCAmelCase )
return config
def _lowerCamelCase ( self ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[Any] = self.scheduler_classes[0]
__a : Union[str, Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
__a : str = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
__a : Optional[int] = self.dummy_model()
__a : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
__a : Tuple = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__a : Tuple = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
__a : int = model(_UpperCAmelCase , _UpperCAmelCase )
__a : List[Any] = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__a : str = output.prev_sample
__a : Union[str, Any] = torch.sum(torch.abs(_UpperCAmelCase ) )
__a : Any = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4e-0_7 ) < 1e-2
assert abs(result_mean.item() - 6.1_1_1_2e-1_0 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_0_0_2 ) < 1e-3
def _lowerCamelCase ( self ):
if torch_device == "mps":
return
__a : Tuple = self.scheduler_classes[0]
__a : Union[str, Any] = self.get_scheduler_config()
__a : Optional[Any] = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
__a : int = self.dummy_model()
__a : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
__a : str = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__a : Union[str, Any] = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
__a : str = model(_UpperCAmelCase , _UpperCAmelCase )
__a : Optional[int] = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__a : Tuple = output.prev_sample
__a : str = torch.sum(torch.abs(_UpperCAmelCase ) )
__a : int = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
def _lowerCamelCase ( self ):
if torch_device == "mps":
return
__a : Optional[int] = self.scheduler_classes[0]
__a : Optional[int] = self.get_scheduler_config()
__a : int = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCAmelCase )
__a : List[Any] = self.dummy_model()
__a : Any = self.dummy_sample_deter.to(_UpperCAmelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__a : Optional[int] = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
__a : int = model(_UpperCAmelCase , _UpperCAmelCase )
__a : Optional[Any] = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__a : int = output.prev_sample
__a : Optional[int] = torch.sum(torch.abs(_UpperCAmelCase ) )
__a : Union[str, Any] = torch.mean(torch.abs(_UpperCAmelCase ) )
if str(_UpperCAmelCase ).startswith('''cpu''' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
| 52
|
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
a__ : Tuple = DDIMPipeline
a__ : List[str] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
a__ : Optional[int] = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""latents""",
"""callback""",
"""callback_steps""",
}
a__ : Optional[int] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
a__ : str = False
def __A ( self : Optional[int] ) -> Any:
torch.manual_seed(0 )
__lowerCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
__lowerCamelCase = DDIMScheduler()
__lowerCamelCase = {'''unet''': unet, '''scheduler''': scheduler}
return components
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int=0 ) -> Tuple:
if str(SCREAMING_SNAKE_CASE__ ).startswith('''mps''' ):
__lowerCamelCase = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
__lowerCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __A ( self : Tuple ) -> List[Any]:
__lowerCamelCase = '''cpu'''
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = pipe(**SCREAMING_SNAKE_CASE__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
__lowerCamelCase = np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
__lowerCamelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE__ , 1e-3 )
def __A ( self : Optional[int] ) -> Any:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def __A ( self : Union[str, Any] ) -> Optional[Any]:
super().test_save_load_local(expected_max_difference=3e-3 )
def __A ( self : Optional[int] ) -> List[Any]:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def __A ( self : List[Any] ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def __A ( self : List[Any] ) -> Tuple:
__lowerCamelCase = '''google/ddpm-cifar10-32'''
__lowerCamelCase = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = DDIMScheduler()
__lowerCamelCase = DDIMPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
ddim.to(SCREAMING_SNAKE_CASE__ )
ddim.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = ddim(generator=SCREAMING_SNAKE_CASE__ , eta=0.0 , output_type='''numpy''' ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self : List[str] ) -> Optional[int]:
__lowerCamelCase = '''google/ddpm-ema-bedroom-256'''
__lowerCamelCase = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = DDIMScheduler.from_pretrained(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = DDIMPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
ddpm.to(SCREAMING_SNAKE_CASE__ )
ddpm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = ddpm(generator=SCREAMING_SNAKE_CASE__ , output_type='''numpy''' ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
__lowerCamelCase = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 298
| 0
|
import warnings
from ..trainer import Trainer
from ..utils import logging
lowerCamelCase_ : Dict = logging.get_logger(__name__)
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self : List[str] , snake_case_ : Tuple=None , **snake_case_ : List[str] ):
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" , snake_case_ , )
super().__init__(args=snake_case_ , **snake_case_ )
| 670
|
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Union[str, Any] = """laion/clap-htsat-unfused"""
UpperCamelCase_: List[str] = tempfile.mkdtemp()
def lowerCAmelCase__ ( self : Tuple , **snake_case_ : Optional[Any] ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **snake_case_ )
def lowerCAmelCase__ ( self : str , **snake_case_ : Any ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Union[str, Any] = self.get_tokenizer()
UpperCamelCase_: int = self.get_feature_extractor()
UpperCamelCase_: List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Any = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCamelCase_: Dict = self.get_feature_extractor(do_normalize=snake_case_ , padding_value=1.0 )
UpperCamelCase_: List[str] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: int = self.get_feature_extractor()
UpperCamelCase_: Optional[Any] = self.get_tokenizer()
UpperCamelCase_: Dict = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: Optional[Any] = floats_list((3, 1000) )
UpperCamelCase_: List[str] = feature_extractor(snake_case_ , return_tensors="""np""" )
UpperCamelCase_: int = processor(audios=snake_case_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: List[Any] = self.get_feature_extractor()
UpperCamelCase_: List[str] = self.get_tokenizer()
UpperCamelCase_: List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: Dict = """This is a test string"""
UpperCamelCase_: Tuple = processor(text=snake_case_ )
UpperCamelCase_: Optional[int] = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: List[str] = self.get_feature_extractor()
UpperCamelCase_: Any = self.get_tokenizer()
UpperCamelCase_: Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase_: Tuple = processor.batch_decode(snake_case_ )
UpperCamelCase_: str = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Any = self.get_feature_extractor()
UpperCamelCase_: str = self.get_tokenizer()
UpperCamelCase_: Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 670
| 1
|
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
A : Union[str, Any] = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str=16 , SCREAMING_SNAKE_CASE : int=13 , SCREAMING_SNAKE_CASE : List[Any]=7 , SCREAMING_SNAKE_CASE : List[Any]=14 , SCREAMING_SNAKE_CASE : str=10 , SCREAMING_SNAKE_CASE : Optional[int]=19 , SCREAMING_SNAKE_CASE : Any=5 , SCREAMING_SNAKE_CASE : Dict=4 , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : Tuple=16 , SCREAMING_SNAKE_CASE : List[str]=2 , SCREAMING_SNAKE_CASE : str=4 , SCREAMING_SNAKE_CASE : int=4 , SCREAMING_SNAKE_CASE : Tuple="gelu" , SCREAMING_SNAKE_CASE : Tuple=0.1 , SCREAMING_SNAKE_CASE : Dict=0.1 , SCREAMING_SNAKE_CASE : Optional[Any]=[1, 2, 3, 4, 5] , SCREAMING_SNAKE_CASE : List[Any]=25 , SCREAMING_SNAKE_CASE : Optional[int]=5 , ):
_A : int = d_model
_A : Union[str, Any] = parent
_A : Optional[int] = batch_size
_A : Dict = prediction_length
_A : Any = context_length
_A : Any = cardinality
_A : Any = num_time_features
_A : Dict = lags_sequence
_A : str = embedding_dimension
_A : int = is_training
_A : List[Any] = hidden_size
_A : Optional[int] = num_hidden_layers
_A : List[str] = num_attention_heads
_A : List[str] = intermediate_size
_A : Optional[Any] = hidden_act
_A : Optional[Any] = hidden_dropout_prob
_A : int = attention_probs_dropout_prob
_A : Union[str, Any] = context_length
_A : Optional[Any] = prediction_length + label_length
_A : Tuple = label_length
_A : Tuple = moving_average
_A : Union[str, Any] = autocorrelation_factor
def A ( self : int):
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def A ( self : Tuple , SCREAMING_SNAKE_CASE : Optional[int]):
_A : Union[str, Any] = config.context_length + max(config.lags_sequence)
_A : Union[str, Any] = ids_tensor([self.batch_size, 1] , config.cardinality[0])
_A : List[Any] = floats_tensor([self.batch_size, _past_length, config.num_time_features])
_A : Any = floats_tensor([self.batch_size, _past_length])
_A : int = floats_tensor([self.batch_size, _past_length]) > 0.5
# decoder inputs
_A : int = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features])
_A : Optional[Any] = floats_tensor([self.batch_size, config.prediction_length])
_A : int = {
'past_values': past_values,
'static_categorical_features': static_categorical_features,
'past_time_features': past_time_features,
'past_observed_mask': past_observed_mask,
'future_time_features': future_time_features,
'future_values': future_values,
}
return inputs_dict
def A ( self : str):
_A : str = self.get_config()
_A : str = self.prepare_autoformer_inputs_dict(SCREAMING_SNAKE_CASE)
return config, inputs_dict
def A ( self : Dict):
_A , _A : Any = self.prepare_config_and_inputs()
return config, inputs_dict
def A ( self : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int):
_A : Tuple = AutoformerModel(config=SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE).eval()
_A : str = model(**SCREAMING_SNAKE_CASE)
_A : Any = outputs.encoder_last_hidden_state
_A : List[Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Tuple = model.get_encoder()
encoder.save_pretrained(SCREAMING_SNAKE_CASE)
_A : str = AutoformerEncoder.from_pretrained(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE)
_A , _A , _A , _A , _A : Optional[Any] = model.create_network_inputs(**SCREAMING_SNAKE_CASE)
_A , _A : Any = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...])
_A : List[str] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_A : Tuple = encoder(inputs_embeds=SCREAMING_SNAKE_CASE)[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3)
_A : Any = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1)
.unsqueeze(1)
.repeat(1 , config.prediction_length , 1)
)
_A : str = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_A : int = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_A : Optional[Any] = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : int = model.get_decoder()
decoder.save_pretrained(SCREAMING_SNAKE_CASE)
_A : List[str] = AutoformerDecoder.from_pretrained(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE)
_A : Optional[int] = decoder(
trend=SCREAMING_SNAKE_CASE , inputs_embeds=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3)
@require_torch
class __lowerCamelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
a = (AutoformerForPrediction,) if is_torch_available() else ()
a = {"feature-extraction": AutoformerModel} if is_torch_available() else {}
a = False
a = False
a = False
a = False
a = False
a = False
def A ( self : int):
_A : Tuple = AutoformerModelTester(self)
_A : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE)
def A ( self : str):
self.config_tester.run_common_tests()
def A ( self : Dict):
_A , _A : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_A : List[str] = model_class(SCREAMING_SNAKE_CASE)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE)
_A , _A : Optional[Any] = model_class.from_pretrained(SCREAMING_SNAKE_CASE , output_loading_info=SCREAMING_SNAKE_CASE)
self.assertEqual(info['missing_keys'] , [])
def A ( self : Union[str, Any]):
_A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*SCREAMING_SNAKE_CASE)
@unittest.skip(reason='Model has no tokens embeddings')
def A ( self : List[str]):
pass
def A ( self : Dict):
_A : Tuple = inspect.signature(getattr(SCREAMING_SNAKE_CASE , 'forward'))
# The main input is the name of the argument after `self`
_A : int = list(model_signature.parameters.keys())[1]
self.assertEqual(AutoformerModel.main_input_name , SCREAMING_SNAKE_CASE)
def A ( self : Union[str, Any]):
_A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Optional[int] = model_class(SCREAMING_SNAKE_CASE)
_A : Dict = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : Dict = [*signature.parameters.keys()]
_A : Optional[Any] = [
'past_values',
'past_time_features',
'past_observed_mask',
'static_categorical_features',
'static_real_features',
'future_values',
'future_time_features',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('future_observed_mask')
expected_arg_names.extend(
[
'decoder_attention_mask',
'head_mask',
'decoder_head_mask',
'cross_attn_head_mask',
'encoder_outputs',
'past_key_values',
'output_hidden_states',
'output_attentions',
'use_cache',
'return_dict',
])
self.assertListEqual(arg_names[: len(SCREAMING_SNAKE_CASE)] , SCREAMING_SNAKE_CASE)
def A ( self : Tuple):
_A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common()
_A : int = True
_A : str = getattr(self.model_tester , 'seq_length' , SCREAMING_SNAKE_CASE)
_A : str = getattr(self.model_tester , 'decoder_seq_length' , SCREAMING_SNAKE_CASE)
_A : Optional[int] = getattr(self.model_tester , 'encoder_seq_length' , SCREAMING_SNAKE_CASE)
_A : str = getattr(self.model_tester , 'd_model' , SCREAMING_SNAKE_CASE)
_A : Optional[Any] = getattr(self.model_tester , 'num_attention_heads' , SCREAMING_SNAKE_CASE)
_A : Optional[Any] = d_model // num_attention_heads
for model_class in self.all_model_classes:
_A : List[Any] = True
_A : List[Any] = False
_A : List[Any] = True
_A : Tuple = model_class(SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
with torch.no_grad():
_A : Dict = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE))
_A : Dict = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE) , self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_A : Union[str, Any] = True
_A : int = model_class(SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
with torch.no_grad():
_A : List[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE))
_A : Optional[int] = outputs.encoder_attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_A : Optional[int] = len(SCREAMING_SNAKE_CASE)
_A : Tuple = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
# decoder attentions
_A : Union[str, Any] = outputs.decoder_attentions
self.assertIsInstance(SCREAMING_SNAKE_CASE , (list, tuple))
self.assertEqual(len(SCREAMING_SNAKE_CASE) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_A : List[Any] = outputs.cross_attentions
self.assertIsInstance(SCREAMING_SNAKE_CASE , (list, tuple))
self.assertEqual(len(SCREAMING_SNAKE_CASE) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(cross_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_A : Dict = True
_A : Any = True
_A : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
with torch.no_grad():
_A : Union[str, Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE))
self.assertEqual(out_len + 2 , len(SCREAMING_SNAKE_CASE))
_A : Optional[int] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def A ( self : Any):
super().test_retain_grad_hidden_states_attentions()
def lowerCAmelCase__ ( lowerCamelCase : str="train-batch.pt" ):
_A : List[Any] = hf_hub_download(repo_id='hf-internal-testing/tourism-monthly-batch' ,filename=lowerCamelCase ,repo_type='dataset' )
_A : Union[str, Any] = torch.load(lowerCamelCase ,map_location=lowerCamelCase )
return batch
@require_torch
@slow
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self : str):
_A : Optional[Any] = AutoformerModel.from_pretrained('huggingface/autoformer-tourism-monthly').to(SCREAMING_SNAKE_CASE)
_A : int = prepare_batch()
with torch.no_grad():
_A : str = model(
past_values=batch['past_values'] , past_time_features=batch['past_time_features'] , past_observed_mask=batch['past_observed_mask'] , static_categorical_features=batch['static_categorical_features'] , future_values=batch['future_values'] , future_time_features=batch['future_time_features'] , )[0]
_A : Union[str, Any] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size))
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE)
_A : Tuple = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=SCREAMING_SNAKE_CASE)
self.assertTrue(torch.allclose(output[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE))
def A ( self : Dict):
_A : Any = AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly').to(SCREAMING_SNAKE_CASE)
_A : List[Any] = prepare_batch('val-batch.pt')
with torch.no_grad():
_A : str = model(
past_values=batch['past_values'] , past_time_features=batch['past_time_features'] , past_observed_mask=batch['past_observed_mask'] , static_categorical_features=batch['static_categorical_features'] , ).encoder_last_hidden_state
_A : int = torch.Size((64, model.config.context_length, model.config.d_model))
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE)
_A : Dict = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=SCREAMING_SNAKE_CASE)
self.assertTrue(torch.allclose(output[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE))
def A ( self : int):
_A : Optional[Any] = AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly').to(SCREAMING_SNAKE_CASE)
_A : str = prepare_batch('val-batch.pt')
with torch.no_grad():
_A : Dict = model.generate(
static_categorical_features=batch['static_categorical_features'] , past_time_features=batch['past_time_features'] , past_values=batch['past_values'] , future_time_features=batch['future_time_features'] , past_observed_mask=batch['past_observed_mask'] , )
_A : Union[str, Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length))
self.assertEqual(outputs.sequences.shape , SCREAMING_SNAKE_CASE)
_A : List[Any] = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=SCREAMING_SNAKE_CASE)
_A : str = outputs.sequences.mean(dim=1)
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , SCREAMING_SNAKE_CASE , rtol=1e-1))
| 128
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self : Union[str, Any]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A ( self : str):
_A : str = 1
_A : int = 3
_A : int = (32, 32)
_A : Any = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(SCREAMING_SNAKE_CASE)
return image
@property
def A ( self : Optional[Any]):
torch.manual_seed(0)
_A : List[str] = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=SCREAMING_SNAKE_CASE , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def A ( self : List[str]):
torch.manual_seed(0)
_A : Optional[int] = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def A ( self : Dict):
torch.manual_seed(0)
_A : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
return CLIPTextModel(SCREAMING_SNAKE_CASE)
def A ( self : int):
_A : str = 'cpu' # ensure determinism for the device-dependent torch.Generator
_A : List[Any] = self.dummy_cond_unet_upscale
_A : Tuple = DDPMScheduler()
_A : str = DDIMScheduler(prediction_type='v_prediction')
_A : List[str] = self.dummy_vae
_A : List[Any] = self.dummy_text_encoder
_A : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
_A : Tuple = self.dummy_image.cpu().permute(0 , 2 , 3 , 1)[0]
_A : Any = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE)).convert('RGB').resize((64, 64))
# make sure here that pndm scheduler skips prk
_A : Optional[int] = StableDiffusionUpscalePipeline(
unet=SCREAMING_SNAKE_CASE , low_res_scheduler=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , vae=SCREAMING_SNAKE_CASE , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , max_noise_level=350 , )
_A : Any = sd_pipe.to(SCREAMING_SNAKE_CASE)
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE)
_A : List[str] = 'A painting of a squirrel eating a burger'
_A : str = torch.Generator(device=SCREAMING_SNAKE_CASE).manual_seed(0)
_A : int = sd_pipe(
[prompt] , image=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
_A : str = output.images
_A : str = torch.Generator(device=SCREAMING_SNAKE_CASE).manual_seed(0)
_A : Optional[int] = sd_pipe(
[prompt] , image=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , return_dict=SCREAMING_SNAKE_CASE , )[0]
_A : Any = image[0, -3:, -3:, -1]
_A : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
_A : str = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_A : Union[str, Any] = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def A ( self : int):
_A : str = 'cpu' # ensure determinism for the device-dependent torch.Generator
_A : Dict = self.dummy_cond_unet_upscale
_A : Optional[int] = DDPMScheduler()
_A : Dict = DDIMScheduler(prediction_type='v_prediction')
_A : int = self.dummy_vae
_A : Dict = self.dummy_text_encoder
_A : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
_A : List[str] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1)[0]
_A : Union[str, Any] = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE)).convert('RGB').resize((64, 64))
# make sure here that pndm scheduler skips prk
_A : str = StableDiffusionUpscalePipeline(
unet=SCREAMING_SNAKE_CASE , low_res_scheduler=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , vae=SCREAMING_SNAKE_CASE , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , max_noise_level=350 , )
_A : List[str] = sd_pipe.to(SCREAMING_SNAKE_CASE)
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE)
_A : Any = 'A painting of a squirrel eating a burger'
_A : Any = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
_A : str = output.images
assert image.shape[0] == 2
_A : List[str] = torch.Generator(device=SCREAMING_SNAKE_CASE).manual_seed(0)
_A : int = sd_pipe(
[prompt] , image=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
_A : List[str] = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU')
def A ( self : Tuple):
_A : Dict = self.dummy_cond_unet_upscale
_A : Tuple = DDPMScheduler()
_A : int = DDIMScheduler(prediction_type='v_prediction')
_A : Union[str, Any] = self.dummy_vae
_A : List[str] = self.dummy_text_encoder
_A : int = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
_A : Optional[int] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1)[0]
_A : Optional[Any] = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE)).convert('RGB').resize((64, 64))
# put models in fp16, except vae as it overflows in fp16
_A : int = unet.half()
_A : List[Any] = text_encoder.half()
# make sure here that pndm scheduler skips prk
_A : Any = StableDiffusionUpscalePipeline(
unet=SCREAMING_SNAKE_CASE , low_res_scheduler=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , vae=SCREAMING_SNAKE_CASE , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , max_noise_level=350 , )
_A : Dict = sd_pipe.to(SCREAMING_SNAKE_CASE)
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE)
_A : str = 'A painting of a squirrel eating a burger'
_A : str = torch.manual_seed(0)
_A : Optional[int] = sd_pipe(
[prompt] , image=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type='np' , ).images
_A : List[Any] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self : str):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Any):
_A : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png')
_A : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat.npy')
_A : Optional[int] = 'stabilityai/stable-diffusion-x4-upscaler'
_A : int = StableDiffusionUpscalePipeline.from_pretrained(SCREAMING_SNAKE_CASE)
pipe.to(SCREAMING_SNAKE_CASE)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE)
pipe.enable_attention_slicing()
_A : List[Any] = 'a cat sitting on a park bench'
_A : str = torch.manual_seed(0)
_A : Any = pipe(
prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , output_type='np' , )
_A : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 1e-3
def A ( self : List[Any]):
_A : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png')
_A : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat_fp16.npy')
_A : Optional[int] = 'stabilityai/stable-diffusion-x4-upscaler'
_A : Any = StableDiffusionUpscalePipeline.from_pretrained(
SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa , )
pipe.to(SCREAMING_SNAKE_CASE)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE)
pipe.enable_attention_slicing()
_A : Any = 'a cat sitting on a park bench'
_A : Optional[Any] = torch.manual_seed(0)
_A : Any = pipe(
prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , output_type='np' , )
_A : List[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 5e-1
def A ( self : int):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_A : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png')
_A : Any = 'stabilityai/stable-diffusion-x4-upscaler'
_A : Any = StableDiffusionUpscalePipeline.from_pretrained(
SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa , )
pipe.to(SCREAMING_SNAKE_CASE)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
_A : Tuple = 'a cat sitting on a park bench'
_A : int = torch.manual_seed(0)
_A : int = pipe(
prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=5 , output_type='np' , )
_A : Dict = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 128
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 709
|
'''simple docstring'''
import numpy as np
import datasets
UpperCAmelCase_ = "\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n"
UpperCAmelCase_ = "\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n"
UpperCAmelCase_ = "\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {'mahalanobis': array([0.5])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
def UpperCamelCase__ ( self ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'X': datasets.Sequence(datasets.Value('float' , id='sequence' ) , id='X' ),
} ) , )
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase ) -> Optional[int]:
# convert to numpy arrays
__a = np.array(UpperCamelCase )
__a = np.array(UpperCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('Expected `X` to be a 2D vector' )
if len(reference_distribution.shape ) != 2:
raise ValueError('Expected `reference_distribution` to be a 2D vector' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension' )
# Get mahalanobis distance for each prediction
__a = X - np.mean(UpperCamelCase )
__a = np.cov(reference_distribution.T )
try:
__a = np.linalg.inv(UpperCamelCase )
except np.linalg.LinAlgError:
__a = np.linalg.pinv(UpperCamelCase )
__a = np.dot(UpperCamelCase , UpperCamelCase )
__a = np.dot(UpperCamelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 490
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
__snake_case : List[Any] = """cvt"""
def __init__( self :Dict , __lowercase :Optional[Any]=3 , __lowercase :int=[7, 3, 3] , __lowercase :Any=[4, 2, 2] , __lowercase :int=[2, 1, 1] , __lowercase :List[Any]=[64, 192, 384] , __lowercase :Dict=[1, 3, 6] , __lowercase :Any=[1, 2, 10] , __lowercase :Dict=[4.0, 4.0, 4.0] , __lowercase :Any=[0.0, 0.0, 0.0] , __lowercase :int=[0.0, 0.0, 0.0] , __lowercase :Any=[0.0, 0.0, 0.1] , __lowercase :int=[True, True, True] , __lowercase :str=[False, False, True] , __lowercase :Any=["dw_bn", "dw_bn", "dw_bn"] , __lowercase :str=[3, 3, 3] , __lowercase :Optional[int]=[1, 1, 1] , __lowercase :Optional[int]=[2, 2, 2] , __lowercase :Optional[Any]=[1, 1, 1] , __lowercase :Any=[1, 1, 1] , __lowercase :Tuple=0.02 , __lowercase :Tuple=1e-1_2 , **__lowercase :str , ):
super().__init__(**__lowercase )
__lowerCamelCase : Any =num_channels
__lowerCamelCase : str =patch_sizes
__lowerCamelCase : Dict =patch_stride
__lowerCamelCase : Optional[int] =patch_padding
__lowerCamelCase : Tuple =embed_dim
__lowerCamelCase : str =num_heads
__lowerCamelCase : Optional[int] =depth
__lowerCamelCase : List[str] =mlp_ratio
__lowerCamelCase : List[str] =attention_drop_rate
__lowerCamelCase : Optional[int] =drop_rate
__lowerCamelCase : List[Any] =drop_path_rate
__lowerCamelCase : Tuple =qkv_bias
__lowerCamelCase : Tuple =cls_token
__lowerCamelCase : Any =qkv_projection_method
__lowerCamelCase : Optional[int] =kernel_qkv
__lowerCamelCase : int =padding_kv
__lowerCamelCase : List[str] =stride_kv
__lowerCamelCase : int =padding_q
__lowerCamelCase : Optional[Any] =stride_q
__lowerCamelCase : List[str] =initializer_range
__lowerCamelCase : Tuple =layer_norm_eps
| 179
|
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
_UpperCamelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if "xprophetnet" in prophetnet_checkpoint_path:
__lowerCamelCase : Optional[Any] =XLMProphetNetForConditionalGenerationOld.from_pretrained(SCREAMING_SNAKE_CASE )
__lowerCamelCase , __lowerCamelCase : List[Any] =XLMProphetNetForConditionalGeneration.from_pretrained(
SCREAMING_SNAKE_CASE , output_loading_info=SCREAMING_SNAKE_CASE )
else:
__lowerCamelCase : int =ProphetNetForConditionalGenerationOld.from_pretrained(SCREAMING_SNAKE_CASE )
__lowerCamelCase , __lowerCamelCase : int =ProphetNetForConditionalGeneration.from_pretrained(
SCREAMING_SNAKE_CASE , output_loading_info=SCREAMING_SNAKE_CASE )
__lowerCamelCase : Optional[Any] =['''key_proj''', '''value_proj''', '''query_proj''']
__lowerCamelCase : Tuple ={
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
__lowerCamelCase : int =key.split('''.''' )
if attributes[0] == "lm_head":
__lowerCamelCase : int =prophet
__lowerCamelCase : Optional[int] =prophet_old
else:
__lowerCamelCase : Any =prophet.prophetnet
__lowerCamelCase : Union[str, Any] =prophet_old.model
__lowerCamelCase : Optional[Any] =False
for attribute in attributes:
if attribute in mapping:
__lowerCamelCase : Optional[Any] =mapping[attribute]
if not hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) > 0:
__lowerCamelCase : Any =attribute
elif hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowerCamelCase : Any =attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__lowerCamelCase : str =old_model.weight
logger.info(F'{attribute} is initialized.' )
__lowerCamelCase : Any =True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__lowerCamelCase : Union[str, Any] =old_model.bias
logger.info(F'{attribute} is initialized' )
__lowerCamelCase : str =True
break
elif attribute in special_keys and hasattr(SCREAMING_SNAKE_CASE , '''in_proj_weight''' ):
__lowerCamelCase : int =old_model.in_proj_weight.shape[0] // 3
__lowerCamelCase : Union[str, Any] =getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__lowerCamelCase : List[str] =nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__lowerCamelCase : str =nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__lowerCamelCase : List[str] =nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__lowerCamelCase : Tuple =nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__lowerCamelCase : Optional[Any] =nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__lowerCamelCase : int =nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__lowerCamelCase : Dict =True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
__lowerCamelCase : str =nn.Parameter(old_model.embed_positions.weight[:512, :] )
__lowerCamelCase : Dict =True
break
if attribute.isdigit():
__lowerCamelCase : List[str] =model[int(SCREAMING_SNAKE_CASE )]
__lowerCamelCase : Optional[Any] =old_model[int(SCREAMING_SNAKE_CASE )]
else:
__lowerCamelCase : int =getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if old_attribute == "":
__lowerCamelCase : Dict =old_model
else:
if not hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError(F'{old_model} does not have {old_attribute}' )
__lowerCamelCase : Tuple =getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if not is_key_init:
raise ValueError(F'{key} was not correctly initialized!' )
print(F'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_UpperCamelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 179
| 1
|
def lowercase ( __A : int ) -> bool:
'''simple docstring'''
snake_case : Union[str, Any] = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 707
|
from random import randint
from tempfile import TemporaryFile
import numpy as np
def lowercase ( __A : Dict , __A : Tuple , __A : Dict ) -> Tuple:
'''simple docstring'''
snake_case : Optional[int] = 0
if start < end:
snake_case : Dict = randint(__A , __A )
snake_case : Union[str, Any] = a[end]
snake_case : Union[str, Any] = a[pivot]
snake_case : List[Any] = temp
snake_case , snake_case : Optional[int] = _in_place_partition(__A , __A , __A )
count += _in_place_quick_sort(__A , __A , p - 1 )
count += _in_place_quick_sort(__A , p + 1 , __A )
return count
def lowercase ( __A : int , __A : Optional[int] , __A : List[str] ) -> int:
'''simple docstring'''
snake_case : Tuple = 0
snake_case : List[str] = randint(__A , __A )
snake_case : Any = a[end]
snake_case : Any = a[pivot]
snake_case : str = temp
snake_case : str = start - 1
for index in range(__A , __A ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
snake_case : Union[str, Any] = new_pivot_index + 1
snake_case : Optional[Any] = a[new_pivot_index]
snake_case : Any = a[index]
snake_case : Dict = temp
snake_case : Tuple = a[new_pivot_index + 1]
snake_case : Dict = a[end]
snake_case : Tuple = temp
return new_pivot_index + 1, count
__lowercase : Optional[Any] = TemporaryFile()
__lowercase : Union[str, Any] = 100 # 1000 elements are to be sorted
__lowercase , __lowercase : Any = 0, 1 # mean and standard deviation
__lowercase : List[str] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
__lowercase : Optional[Any] = np.load(outfile)
__lowercase : Tuple = len(M) - 1
__lowercase : int = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 315
| 0
|
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
UpperCAmelCase_ = '''0.12''' # assumed parallelism: 8
if is_torch_available():
import torch
def lowerCAmelCase_ ( lowercase: Any , lowercase: List[str] , lowercase: Dict=None ) -> List[Any]:
'''simple docstring'''
if rng is None:
_UpperCamelCase: Optional[int] = random.Random()
_UpperCamelCase: Optional[int] = 1
for dim in shape:
total_dims *= dim
_UpperCamelCase: int = []
for _ in range(__a ):
values.append(rng.randint(0 , vocab_size - 1 ) )
_UpperCamelCase: str = np.array(__a , dtype=jnp.intaa ).reshape(__a )
return output
def lowerCAmelCase_ ( lowercase: str , lowercase: int=None ) -> Tuple:
'''simple docstring'''
_UpperCamelCase: str = ids_tensor(__a , vocab_size=2 , rng=__a )
# make sure that at least one token is attended to for each batch
_UpperCamelCase: Tuple = 1
return attn_mask
@require_flax
class __magic_name__ :
"""simple docstring"""
lowerCAmelCase : Tuple = None
lowerCAmelCase : Any = ()
def lowerCAmelCase ( self : Any ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase: str = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
_UpperCamelCase: Tuple = 2
_UpperCamelCase: Optional[Any] = inputs['''input_ids'''].shape[-1] // 2
_UpperCamelCase: Dict = inputs['''input_ids'''][:max_batch_size, :sequence_length]
_UpperCamelCase: Tuple = jnp.ones_like(_snake_case )
_UpperCamelCase: Optional[int] = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
_UpperCamelCase: Optional[int] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
_UpperCamelCase: int = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase: Dict = self._get_input_ids_and_config()
_UpperCamelCase: List[Any] = False
_UpperCamelCase: Dict = max_length
_UpperCamelCase: Optional[int] = 0
for model_class in self.all_generative_model_classes:
_UpperCamelCase: List[Any] = model_class(_snake_case )
_UpperCamelCase: Tuple = model_class.__name__[4:] # Skip the "Flax" at the beginning
_UpperCamelCase: Optional[int] = getattr(_snake_case , _snake_case )
_UpperCamelCase: List[str] = pt_model_class(_snake_case ).eval()
_UpperCamelCase: str = load_flax_weights_in_pytorch_model(_snake_case , flax_model.params )
_UpperCamelCase: Tuple = flax_model.generate(_snake_case ).sequences
_UpperCamelCase: Any = pt_model.generate(torch.tensor(_snake_case , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
_UpperCamelCase: int = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def lowerCAmelCase ( self : Any ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase: Dict = self._get_input_ids_and_config()
_UpperCamelCase: Any = False
_UpperCamelCase: Optional[int] = max_length
for model_class in self.all_generative_model_classes:
_UpperCamelCase: Optional[int] = model_class(_snake_case )
_UpperCamelCase: Tuple = model.generate(_snake_case ).sequences
self.assertEqual(generation_outputs.shape[-1] , _snake_case )
_UpperCamelCase: str = jit(model.generate )
_UpperCamelCase: Dict = jit_generate(_snake_case ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase: Dict = self._get_input_ids_and_config()
_UpperCamelCase: Tuple = True
_UpperCamelCase: Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
_UpperCamelCase: List[Any] = model_class(_snake_case )
_UpperCamelCase: Union[str, Any] = model.generate(_snake_case ).sequences
self.assertEqual(generation_outputs.shape[-1] , _snake_case )
_UpperCamelCase: str = jit(model.generate )
_UpperCamelCase: Dict = jit_generate(_snake_case ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase: Any = self._get_input_ids_and_config()
_UpperCamelCase: Optional[int] = False
_UpperCamelCase: Optional[int] = max_length
_UpperCamelCase: Any = 2
for model_class in self.all_generative_model_classes:
_UpperCamelCase: List[str] = model_class(_snake_case )
_UpperCamelCase: Optional[Any] = model.generate(_snake_case ).sequences
self.assertEqual(generation_outputs.shape[-1] , _snake_case )
_UpperCamelCase: Dict = jit(model.generate )
_UpperCamelCase: Tuple = jit_generate(_snake_case ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowerCAmelCase ( self : str ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase: Optional[Any] = self._get_input_ids_and_config()
_UpperCamelCase: List[str] = False
_UpperCamelCase: Optional[int] = max_length
_UpperCamelCase: Any = 2
_UpperCamelCase: Dict = 2
for model_class in self.all_generative_model_classes:
_UpperCamelCase: str = model_class(_snake_case )
_UpperCamelCase: str = model.generate(_snake_case ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase: Optional[Any] = self._get_input_ids_and_config()
_UpperCamelCase: List[Any] = True
_UpperCamelCase: str = max_length
_UpperCamelCase: str = 0.8
_UpperCamelCase: List[str] = 10
_UpperCamelCase: Any = 0.3
_UpperCamelCase: Dict = 1
_UpperCamelCase: str = 8
_UpperCamelCase: str = 9
for model_class in self.all_generative_model_classes:
_UpperCamelCase: Dict = model_class(_snake_case )
_UpperCamelCase: Any = model.generate(_snake_case ).sequences
self.assertEqual(generation_outputs.shape[-1] , _snake_case )
_UpperCamelCase: str = jit(model.generate )
_UpperCamelCase: Any = jit_generate(_snake_case ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase: List[Any] = self._get_input_ids_and_config()
_UpperCamelCase: Optional[Any] = max_length
_UpperCamelCase: List[Any] = 1
_UpperCamelCase: Any = 8
_UpperCamelCase: Tuple = 9
for model_class in self.all_generative_model_classes:
_UpperCamelCase: str = model_class(_snake_case )
_UpperCamelCase: Tuple = model.generate(_snake_case ).sequences
self.assertEqual(generation_outputs.shape[-1] , _snake_case )
_UpperCamelCase: List[str] = jit(model.generate )
_UpperCamelCase: Union[str, Any] = jit_generate(_snake_case ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase: Union[str, Any] = self._get_input_ids_and_config()
_UpperCamelCase: Dict = max_length
_UpperCamelCase: Any = 2
_UpperCamelCase: Any = 1
_UpperCamelCase: Tuple = 8
_UpperCamelCase: Optional[Any] = 9
for model_class in self.all_generative_model_classes:
_UpperCamelCase: List[Any] = model_class(_snake_case )
_UpperCamelCase: List[Any] = model.generate(_snake_case ).sequences
self.assertEqual(generation_outputs.shape[-1] , _snake_case )
_UpperCamelCase: Optional[int] = jit(model.generate )
_UpperCamelCase: Optional[Any] = jit_generate(_snake_case ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase: int = self._get_input_ids_and_config()
# pad attention mask on the left
_UpperCamelCase: Union[str, Any] = attention_mask.at[(0, 0)].set(0 )
_UpperCamelCase: List[Any] = False
_UpperCamelCase: List[Any] = max_length
for model_class in self.all_generative_model_classes:
_UpperCamelCase: Optional[int] = model_class(_snake_case )
_UpperCamelCase: List[Any] = model.generate(_snake_case , attention_mask=_snake_case ).sequences
self.assertEqual(generation_outputs.shape[-1] , _snake_case )
_UpperCamelCase: int = jit(model.generate )
_UpperCamelCase: Tuple = jit_generate(_snake_case , attention_mask=_snake_case ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase: Optional[int] = self._get_input_ids_and_config()
# pad attention mask on the left
_UpperCamelCase: Dict = attention_mask.at[(0, 0)].set(0 )
_UpperCamelCase: Union[str, Any] = True
_UpperCamelCase: Dict = max_length
for model_class in self.all_generative_model_classes:
_UpperCamelCase: Tuple = model_class(_snake_case )
_UpperCamelCase: int = model.generate(_snake_case , attention_mask=_snake_case ).sequences
self.assertEqual(generation_outputs.shape[-1] , _snake_case )
_UpperCamelCase: Any = jit(model.generate )
_UpperCamelCase: List[Any] = jit_generate(_snake_case , attention_mask=_snake_case ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowerCAmelCase ( self : Dict ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase: Tuple = self._get_input_ids_and_config()
# pad attention mask on the left
_UpperCamelCase: Dict = attention_mask.at[(0, 0)].set(0 )
_UpperCamelCase: List[Any] = 2
_UpperCamelCase: Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
_UpperCamelCase: List[Any] = model_class(_snake_case )
_UpperCamelCase: int = model.generate(_snake_case , attention_mask=_snake_case ).sequences
self.assertEqual(generation_outputs.shape[-1] , _snake_case )
_UpperCamelCase: int = jit(model.generate )
_UpperCamelCase: int = jit_generate(_snake_case , attention_mask=_snake_case ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_UpperCamelCase: List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''' )
_UpperCamelCase: Tuple = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
_UpperCamelCase: Optional[int] = '''Hello world'''
_UpperCamelCase: str = tokenizer(_snake_case , return_tensors='''np''' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(_snake_case , '''do_samples''' ):
model.generate(_snake_case , do_samples=_snake_case )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(_snake_case , '''foo''' ):
_UpperCamelCase: Any = {'''foo''': '''bar'''}
model.generate(_snake_case , **_snake_case )
| 271
|
"""simple docstring"""
import operator
def A_ (__a , __a = False , __a = None ):
'''simple docstring'''
A_ = operator.lt if reverse else operator.gt
A_ = solution or []
if not arr:
return solution
A_ = [arr.pop(0 )]
for i, item in enumerate(__a ):
if _operator(__a , sublist[-1] ):
sublist.append(__a )
arr.pop(__a )
# merging sublist into solution list
if not solution:
solution.extend(__a )
else:
while sublist:
A_ = sublist.pop(0 )
for i, xx in enumerate(__a ):
if not _operator(__a , __a ):
solution.insert(__a , __a )
break
else:
solution.append(__a )
strand_sort(__a , __a , __a )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 115
| 0
|
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ : List[str] = logging.get_logger(__name__)
def __a ( __UpperCAmelCase : str ) -> str:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
lowerCamelCase_ : Optional[Any] = MaskFormerConfig(backbone_config=__UpperCAmelCase )
lowerCamelCase_ : Tuple = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
lowerCamelCase_ : Union[str, Any] = 847
lowerCamelCase_ : Union[str, Any] = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
lowerCamelCase_ : List[str] = 150
lowerCamelCase_ : Dict = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
lowerCamelCase_ : Optional[int] = 171
lowerCamelCase_ : Optional[Any] = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
lowerCamelCase_ : Tuple = 133
lowerCamelCase_ : Any = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
lowerCamelCase_ : Union[str, Any] = 19
lowerCamelCase_ : List[str] = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
lowerCamelCase_ : Any = 65
lowerCamelCase_ : Optional[int] = "mapillary-vistas-id2label.json"
lowerCamelCase_ : Optional[Any] = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type="dataset" ) , "r" ) )
lowerCamelCase_ : Dict = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
return config
def __a ( __UpperCAmelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : List[str] = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.norm1.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.norm1.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.attn.relative_position_index", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.attn.proj.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.attn.proj.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.norm2.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.norm2.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.mlp.fc1.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.mlp.fc1.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.mlp.fc2.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.mlp.fc2.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((f"backbone.layers.{i}.downsample.reduction.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((f"backbone.layers.{i}.downsample.norm.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((f"backbone.layers.{i}.downsample.norm.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append((f"backbone.norm{i}.weight", f"model.pixel_level_module.encoder.hidden_states_norms.{i}.weight") )
rename_keys.append((f"backbone.norm{i}.bias", f"model.pixel_level_module.encoder.hidden_states_norms.{i}.bias") )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f"sem_seg_head.adapter_{source_index}.weight", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight") )
rename_keys.append((f"sem_seg_head.adapter_{source_index}.norm.weight", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight") )
rename_keys.append((f"sem_seg_head.adapter_{source_index}.norm.bias", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias") )
rename_keys.append((f"sem_seg_head.layer_{source_index}.weight", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight") )
rename_keys.append((f"sem_seg_head.layer_{source_index}.norm.weight", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight") )
rename_keys.append((f"sem_seg_head.layer_{source_index}.norm.bias", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias") )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight", f"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias", f"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias") )
# cross-attention out projection
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight", f"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias", f"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias") )
# MLP 1
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight", f"model.transformer_module.decoder.layers.{idx}.fc1.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias", f"model.transformer_module.decoder.layers.{idx}.fc1.bias") )
# MLP 2
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight", f"model.transformer_module.decoder.layers.{idx}.fc2.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias", f"model.transformer_module.decoder.layers.{idx}.fc2.bias") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight", f"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias", f"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight", f"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias", f"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias") )
# layernorm 3 (final layernorm)
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight", f"model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias", f"model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((f"sem_seg_head.predictor.mask_embed.layers.{i}.weight", f"mask_embedder.{i}.0.weight") )
rename_keys.append((f"sem_seg_head.predictor.mask_embed.layers.{i}.bias", f"mask_embedder.{i}.0.bias") )
# fmt: on
return rename_keys
def __a ( __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict ) -> Dict:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = dct.pop(__UpperCAmelCase )
lowerCamelCase_ : Union[str, Any] = val
def __a ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowerCamelCase_ : Optional[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowerCamelCase_ : List[str] = state_dict.pop(f"backbone.layers.{i}.blocks.{j}.attn.qkv.weight" )
lowerCamelCase_ : Optional[Any] = state_dict.pop(f"backbone.layers.{i}.blocks.{j}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ : List[Any] = in_proj_weight[:dim, :]
lowerCamelCase_ : List[str] = in_proj_bias[: dim]
lowerCamelCase_ : List[Any] = in_proj_weight[
dim : dim * 2, :
]
lowerCamelCase_ : Union[str, Any] = in_proj_bias[
dim : dim * 2
]
lowerCamelCase_ : Optional[Any] = in_proj_weight[
-dim :, :
]
lowerCamelCase_ : Optional[int] = in_proj_bias[-dim :]
# fmt: on
def __a ( __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
lowerCamelCase_ : List[str] = state_dict.pop(f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight" )
lowerCamelCase_ : str = state_dict.pop(f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ : Optional[Any] = in_proj_weight[: hidden_size, :]
lowerCamelCase_ : Union[str, Any] = in_proj_bias[:config.hidden_size]
lowerCamelCase_ : Any = in_proj_weight[hidden_size : hidden_size * 2, :]
lowerCamelCase_ : str = in_proj_bias[hidden_size : hidden_size * 2]
lowerCamelCase_ : Optional[Any] = in_proj_weight[-hidden_size :, :]
lowerCamelCase_ : Optional[int] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
lowerCamelCase_ : int = state_dict.pop(f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight" )
lowerCamelCase_ : Optional[int] = state_dict.pop(f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ : Dict = in_proj_weight[: hidden_size, :]
lowerCamelCase_ : Tuple = in_proj_bias[:config.hidden_size]
lowerCamelCase_ : Union[str, Any] = in_proj_weight[hidden_size : hidden_size * 2, :]
lowerCamelCase_ : List[Any] = in_proj_bias[hidden_size : hidden_size * 2]
lowerCamelCase_ : List[Any] = in_proj_weight[-hidden_size :, :]
lowerCamelCase_ : Tuple = in_proj_bias[-hidden_size :]
# fmt: on
def __a ( ) -> torch.Tensor:
"""simple docstring"""
lowerCamelCase_ : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase_ : List[str] = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def __a ( __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : bool = False ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ : Any = get_maskformer_config(__UpperCAmelCase )
# load original state_dict
with open(__UpperCAmelCase , "rb" ) as f:
lowerCamelCase_ : str = pickle.load(__UpperCAmelCase )
lowerCamelCase_ : Union[str, Any] = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
lowerCamelCase_ : Union[str, Any] = create_rename_keys(__UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
read_in_swin_q_k_v(__UpperCAmelCase , config.backbone_config )
read_in_decoder_q_k_v(__UpperCAmelCase , __UpperCAmelCase )
# update to torch tensors
for key, value in state_dict.items():
lowerCamelCase_ : Dict = torch.from_numpy(__UpperCAmelCase )
# load 🤗 model
lowerCamelCase_ : Optional[Any] = MaskFormerForInstanceSegmentation(__UpperCAmelCase )
model.eval()
for name, param in model.named_parameters():
print(__UpperCAmelCase , param.shape )
lowerCamelCase_ : List[Any] = model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__UpperCAmelCase ) == 0, f"Unexpected keys: {unexpected_keys}"
# verify results
lowerCamelCase_ : Dict = prepare_img()
if "vistas" in model_name:
lowerCamelCase_ : Any = 65
elif "cityscapes" in model_name:
lowerCamelCase_ : List[Any] = 65535
else:
lowerCamelCase_ : Union[str, Any] = 255
lowerCamelCase_ : Optional[int] = True if "ade" in model_name else False
lowerCamelCase_ : Union[str, Any] = MaskFormerImageProcessor(ignore_index=__UpperCAmelCase , reduce_labels=__UpperCAmelCase )
lowerCamelCase_ : Optional[int] = image_processor(__UpperCAmelCase , return_tensors="pt" )
lowerCamelCase_ : List[Any] = model(**__UpperCAmelCase )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
lowerCamelCase_ : List[str] = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , __UpperCAmelCase , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"Saving model and image processor to {pytorch_dump_folder_path}" )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
model.save_pretrained(__UpperCAmelCase )
image_processor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(f"nielsr/{model_name}" )
image_processor.push_to_hub(f"nielsr/{model_name}" )
if __name__ == "__main__":
snake_case_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="maskformer-swin-tiny-ade",
type=str,
help=("Name of the MaskFormer model you'd like to convert",),
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl",
type=str,
help="Path to the original state dict (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
snake_case_ : Any = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 703
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case_ :
'''simple docstring'''
def __init__( self : Any , __magic_name__ : Union[str, Any] , __magic_name__ : List[str]=12 , __magic_name__ : int=7 , __magic_name__ : str=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : int=99 , __magic_name__ : str=32 , __magic_name__ : Optional[Any]=32 , __magic_name__ : int=2 , __magic_name__ : Optional[int]=4 , __magic_name__ : List[Any]=37 , __magic_name__ : int=0.1 , __magic_name__ : int=0.1 , __magic_name__ : Any=512 , __magic_name__ : Optional[Any]=0.02 , __magic_name__ : str=0 , __magic_name__ : Dict=None , ) -> Optional[Any]:
lowerCamelCase_ : List[str] = parent
lowerCamelCase_ : Union[str, Any] = batch_size
lowerCamelCase_ : int = seq_length
lowerCamelCase_ : Optional[int] = is_training
lowerCamelCase_ : str = use_input_mask
lowerCamelCase_ : str = use_labels
lowerCamelCase_ : Optional[int] = vocab_size
lowerCamelCase_ : Optional[Any] = hidden_size
lowerCamelCase_ : str = projection_dim
lowerCamelCase_ : int = num_hidden_layers
lowerCamelCase_ : str = num_attention_heads
lowerCamelCase_ : Any = intermediate_size
lowerCamelCase_ : Optional[int] = dropout
lowerCamelCase_ : str = attention_dropout
lowerCamelCase_ : List[Any] = max_position_embeddings
lowerCamelCase_ : Dict = initializer_range
lowerCamelCase_ : Optional[Any] = scope
lowerCamelCase_ : List[str] = bos_token_id
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
lowerCamelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : int = None
if self.use_input_mask:
lowerCamelCase_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
lowerCamelCase_ : List[Any] = input_mask.numpy()
lowerCamelCase_ , lowerCamelCase_ : Optional[Any] = input_mask.shape
lowerCamelCase_ : int = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__magic_name__ ):
lowerCamelCase_ : Tuple = 1
lowerCamelCase_ : str = 0
lowerCamelCase_ : str = self.get_config()
return config, input_ids, tf.convert_to_tensor(__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : int , __magic_name__ : List[Any] , __magic_name__ : Dict ) -> Any:
lowerCamelCase_ : Union[str, Any] = TFBlipTextModel(config=__magic_name__ )
lowerCamelCase_ : int = model(__magic_name__ , attention_mask=__magic_name__ , training=__magic_name__ )
lowerCamelCase_ : Dict = model(__magic_name__ , training=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
lowerCamelCase_ : List[str] = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : int = config_and_inputs
lowerCamelCase_ : int = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class snake_case_ ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = (TFBlipTextModel,) if is_tf_available() else ()
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
lowerCamelCase_ : List[str] = BlipTextModelTester(self )
lowerCamelCase_ : List[Any] = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
lowerCamelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> str:
pass
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
pass
@slow
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ : List[Any] = TFBlipTextModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : Dict=True ) -> Union[str, Any]:
super().test_pt_tf_model_equivalence(allow_missing_keys=__magic_name__ )
| 253
| 0
|
"""simple docstring"""
import random
class lowerCAmelCase_ :
'''simple docstring'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : str ) -> tuple[list[int], list[int]]:
A = [ord(A_ ) for i in text]
A = []
A = []
for i in plain:
A = random.randint(1 ,300 )
A = (i + k) * k
cipher.append(A_ )
key.append(A_ )
return cipher, key
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : list[int] ,A_ : list[int] ) -> str:
A = []
for i in range(len(A_ ) ):
A = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(A_ ) )
return "".join(A_ )
if __name__ == "__main__":
_lowercase , _lowercase = Onepad().encrypt('''Hello''')
print(c, k)
print(Onepad().decrypt(c, k))
| 91
|
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase_ ( A ):
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : AutoencoderKL , __lowerCamelCase : CLIPTextModel , __lowerCamelCase : CLIPTokenizer , __lowerCamelCase : UNetaDConditionModel , __lowerCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __lowerCamelCase : StableDiffusionSafetyChecker , __lowerCamelCase : CLIPImageProcessor , ):
"""simple docstring"""
super().__init__()
self.register_modules(
vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , unet=__lowerCamelCase , scheduler=__lowerCamelCase , safety_checker=__lowerCamelCase , feature_extractor=__lowerCamelCase , )
def lowerCAmelCase_ ( self : List[str] , __lowerCamelCase : Optional[Union[str, int]] = "auto" ):
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_SCREAMING_SNAKE_CASE = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__lowerCamelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
self.enable_attention_slicing(__lowerCamelCase )
@torch.no_grad()
def __call__( self : List[str] , __lowerCamelCase : Union[str, List[str]] , __lowerCamelCase : int = 5_1_2 , __lowerCamelCase : int = 5_1_2 , __lowerCamelCase : int = 5_0 , __lowerCamelCase : float = 7.5 , __lowerCamelCase : Optional[Union[str, List[str]]] = None , __lowerCamelCase : Optional[int] = 1 , __lowerCamelCase : float = 0.0 , __lowerCamelCase : Optional[torch.Generator] = None , __lowerCamelCase : Optional[torch.FloatTensor] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowerCamelCase : int = 1 , __lowerCamelCase : Optional[torch.FloatTensor] = None , **__lowerCamelCase : Tuple , ):
"""simple docstring"""
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE = 1
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(__lowerCamelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__lowerCamelCase , __lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(__lowerCamelCase )}.""" )
# get prompt text embeddings
_SCREAMING_SNAKE_CASE = self.tokenizer(
__lowerCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
_SCREAMING_SNAKE_CASE = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
_SCREAMING_SNAKE_CASE = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
_SCREAMING_SNAKE_CASE = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = text_embeddings.shape
_SCREAMING_SNAKE_CASE = text_embeddings.repeat(1 , __lowerCamelCase , 1 )
_SCREAMING_SNAKE_CASE = text_embeddings.view(bs_embed * num_images_per_prompt , __lowerCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_SCREAMING_SNAKE_CASE = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_SCREAMING_SNAKE_CASE = 42
if negative_prompt is None:
_SCREAMING_SNAKE_CASE = [""]
elif type(__lowerCamelCase ) is not type(__lowerCamelCase ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(__lowerCamelCase )} !="""
F""" {type(__lowerCamelCase )}.""" )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE = [negative_prompt]
elif batch_size != len(__lowerCamelCase ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(__lowerCamelCase )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
" the batch size of `prompt`." )
else:
_SCREAMING_SNAKE_CASE = negative_prompt
_SCREAMING_SNAKE_CASE = text_input_ids.shape[-1]
_SCREAMING_SNAKE_CASE = self.tokenizer(
__lowerCamelCase , padding="max_length" , max_length=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors="pt" , )
_SCREAMING_SNAKE_CASE = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_SCREAMING_SNAKE_CASE = uncond_embeddings.shape[1]
_SCREAMING_SNAKE_CASE = uncond_embeddings.repeat(__lowerCamelCase , __lowerCamelCase , 1 )
_SCREAMING_SNAKE_CASE = uncond_embeddings.view(batch_size * num_images_per_prompt , __lowerCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_SCREAMING_SNAKE_CASE = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_SCREAMING_SNAKE_CASE = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
_SCREAMING_SNAKE_CASE = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 6_4, 6_4)
_SCREAMING_SNAKE_CASE = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
_SCREAMING_SNAKE_CASE = torch.randn(
__lowerCamelCase , generator=__lowerCamelCase , device="cpu" , dtype=__lowerCamelCase ).to(self.device )
_SCREAMING_SNAKE_CASE = torch.randn(__lowerCamelCase , generator=__lowerCamelCase , device="cpu" , dtype=__lowerCamelCase ).to(
self.device )
else:
_SCREAMING_SNAKE_CASE = torch.randn(
__lowerCamelCase , generator=__lowerCamelCase , device=self.device , dtype=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = torch.randn(__lowerCamelCase , generator=__lowerCamelCase , device=self.device , dtype=__lowerCamelCase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
_SCREAMING_SNAKE_CASE = latents_reference.to(self.device )
_SCREAMING_SNAKE_CASE = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
_SCREAMING_SNAKE_CASE = (latents_shape[3] - latents_shape_reference[3]) // 2
_SCREAMING_SNAKE_CASE = (latents_shape[2] - latents_shape_reference[2]) // 2
_SCREAMING_SNAKE_CASE = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
_SCREAMING_SNAKE_CASE = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
_SCREAMING_SNAKE_CASE = 0 if dx < 0 else dx
_SCREAMING_SNAKE_CASE = 0 if dy < 0 else dy
_SCREAMING_SNAKE_CASE = max(-dx , 0 )
_SCREAMING_SNAKE_CASE = max(-dy , 0 )
# import pdb
# pdb.set_trace()
_SCREAMING_SNAKE_CASE = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(__lowerCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
_SCREAMING_SNAKE_CASE = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_SCREAMING_SNAKE_CASE = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_SCREAMING_SNAKE_CASE = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_SCREAMING_SNAKE_CASE = {}
if accepts_eta:
_SCREAMING_SNAKE_CASE = eta
for i, t in enumerate(self.progress_bar(__lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
_SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_SCREAMING_SNAKE_CASE = self.scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase )
# predict the noise residual
_SCREAMING_SNAKE_CASE = self.unet(__lowerCamelCase , __lowerCamelCase , encoder_hidden_states=__lowerCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = noise_pred.chunk(2 )
_SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
_SCREAMING_SNAKE_CASE = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = 1 / 0.1_8_2_1_5 * latents
_SCREAMING_SNAKE_CASE = self.vae.decode(__lowerCamelCase ).sample
_SCREAMING_SNAKE_CASE = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
_SCREAMING_SNAKE_CASE = self.feature_extractor(self.numpy_to_pil(__lowerCamelCase ) , return_tensors="pt" ).to(
self.device )
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.safety_checker(
images=__lowerCamelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
_SCREAMING_SNAKE_CASE = None
if output_type == "pil":
_SCREAMING_SNAKE_CASE = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=__lowerCamelCase , nsfw_content_detected=__lowerCamelCase )
| 418
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A_ : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
A_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64
|
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
A_ : Tuple = datasets.utils.logging.get_logger(__name__)
@dataclass
class _a (datasets.BuilderConfig ):
'''simple docstring'''
UpperCAmelCase__: Optional[datasets.Features] = None
UpperCAmelCase__: str = "utf-8"
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: bool = True # deprecated
UpperCAmelCase__: Optional[int] = None # deprecated
UpperCAmelCase__: int = 10 << 20 # 10MB
UpperCAmelCase__: Optional[bool] = None
class _a (datasets.ArrowBasedBuilder ):
'''simple docstring'''
UpperCAmelCase__: List[str] = JsonConfig
def __A ( self ):
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
A__ : Union[str, Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def __A ( self , A__ ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
A__ : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(A__ , (str, list, tuple) ):
A__ : Optional[Any] = data_files
if isinstance(A__ , A__ ):
A__ : List[str] = [files]
A__ : int = [dl_manager.iter_files(A__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
A__ : List[str] = []
for split_name, files in data_files.items():
if isinstance(A__ , A__ ):
A__ : Optional[int] = [files]
A__ : Optional[int] = [dl_manager.iter_files(A__ ) for file in files]
splits.append(datasets.SplitGenerator(name=A__ , gen_kwargs={"""files""": files} ) )
return splits
def __A ( self , A__ ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
A__ : Optional[Any] = self.config.features.arrow_schema.field(A__ ).type
A__ : str = pa_table.append_column(A__ , pa.array([None] * len(A__ ) , type=A__ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
A__ : Optional[int] = table_cast(A__ , self.config.features.arrow_schema )
return pa_table
def __A ( self , A__ ):
for file_idx, file in enumerate(itertools.chain.from_iterable(A__ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(A__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A__ : Optional[Any] = json.load(A__ )
# We keep only the field we are interested in
A__ : Optional[int] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(A__ , (list, tuple) ):
A__ : Union[str, Any] = set().union(*[row.keys() for row in dataset] )
A__ : Any = {col: [row.get(A__ ) for row in dataset] for col in keys}
else:
A__ : Any = dataset
A__ : Any = pa.Table.from_pydict(A__ )
yield file_idx, self._cast_table(A__ )
# If the file has one json object per line
else:
with open(A__ , """rb""" ) as f:
A__ : List[str] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
A__ : List[str] = max(self.config.chunksize // 32 , 16 << 10 )
A__ : Any = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
A__ : Dict = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(A__ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
A__ : List[Any] = batch.decode(self.config.encoding , errors=A__ ).encode("""utf-8""" )
try:
while True:
try:
A__ : str = paj.read_json(
io.BytesIO(A__ ) , read_options=paj.ReadOptions(block_size=A__ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(A__ , pa.ArrowInvalid )
and "straddling" not in str(A__ )
or block_size > len(A__ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(A__ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
A__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A__ : Optional[Any] = json.load(A__ )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(A__ , A__ ): # list is the only sequence type supported in JSON
try:
A__ : str = set().union(*[row.keys() for row in dataset] )
A__ : List[str] = {col: [row.get(A__ ) for row in dataset] for col in keys}
A__ : int = pa.Table.from_pydict(A__ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(A__ )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(A__ )
batch_idx += 1
| 64
| 1
|
import pickle
import numpy as np
from matplotlib import pyplot as plt
class A :
'''simple docstring'''
def __init__( self : Any , __lowerCAmelCase : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any=0.2 , __lowerCAmelCase : Any=0.2 ) -> str:
"""simple docstring"""
A__ = bp_numa
A__ = bp_numa
A__ = bp_numa
A__ = conva_get[:2]
A__ = conva_get[2]
A__ = size_pa
A__ = rate_w
A__ = rate_t
A__ = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
A__ = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
A__ = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
A__ = -2 * np.random.rand(self.conva[1] ) + 1
A__ = -2 * np.random.rand(self.num_bpa ) + 1
A__ = -2 * np.random.rand(self.num_bpa ) + 1
def a_ ( self : str , __lowerCAmelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
A__ = {
"""num_bp1""": self.num_bpa,
"""num_bp2""": self.num_bpa,
"""num_bp3""": self.num_bpa,
"""conv1""": self.conva,
"""step_conv1""": self.step_conva,
"""size_pooling1""": self.size_poolinga,
"""rate_weight""": self.rate_weight,
"""rate_thre""": self.rate_thre,
"""w_conv1""": self.w_conva,
"""wkj""": self.wkj,
"""vji""": self.vji,
"""thre_conv1""": self.thre_conva,
"""thre_bp2""": self.thre_bpa,
"""thre_bp3""": self.thre_bpa,
}
with open(__lowerCAmelCase , """wb""" ) as f:
pickle.dump(__lowerCAmelCase , __lowerCAmelCase )
print(f'Model saved: {save_path}' )
@classmethod
def a_ ( cls : List[str] , __lowerCAmelCase : int ) -> Tuple:
"""simple docstring"""
with open(__lowerCAmelCase , """rb""" ) as f:
A__ = pickle.load(__lowerCAmelCase ) # noqa: S301
A__ = model_dic.get("""conv1""" )
conv_get.append(model_dic.get("""step_conv1""" ) )
A__ = model_dic.get("""size_pooling1""" )
A__ = model_dic.get("""num_bp1""" )
A__ = model_dic.get("""num_bp2""" )
A__ = model_dic.get("""num_bp3""" )
A__ = model_dic.get("""rate_weight""" )
A__ = model_dic.get("""rate_thre""" )
# create model instance
A__ = CNN(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# modify model parameter
A__ = model_dic.get("""w_conv1""" )
A__ = model_dic.get("""wkj""" )
A__ = model_dic.get("""vji""" )
A__ = model_dic.get("""thre_conv1""" )
A__ = model_dic.get("""thre_bp2""" )
A__ = model_dic.get("""thre_bp3""" )
return conv_ins
def a_ ( self : Optional[int] , __lowerCAmelCase : int ) -> int:
"""simple docstring"""
return 1 / (1 + np.exp(-1 * x ))
def a_ ( self : Optional[int] , __lowerCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
return round(__lowerCAmelCase , 3 )
def a_ ( self : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : int ) -> List[str]:
"""simple docstring"""
A__ = convs[0]
A__ = convs[1]
A__ = np.shape(__lowerCAmelCase )[0]
# get the data slice of original image data, data_focus
A__ = []
for i_focus in range(0 , size_data - size_conv + 1 , __lowerCAmelCase ):
for j_focus in range(0 , size_data - size_conv + 1 , __lowerCAmelCase ):
A__ = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__lowerCAmelCase )
# calculate the feature map of every single kernel, and saved as list of matrix
A__ = []
A__ = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(__lowerCAmelCase ):
A__ = []
for i_focus in range(len(__lowerCAmelCase ) ):
A__ = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(__lowerCAmelCase ) )
A__ = np.asmatrix(__lowerCAmelCase ).reshape(
__lowerCAmelCase , __lowerCAmelCase )
data_featuremap.append(__lowerCAmelCase )
# expanding the data slice to One dimenssion
A__ = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__lowerCAmelCase ) )
A__ = np.asarray(__lowerCAmelCase )
return focus_list, data_featuremap
def a_ ( self : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any]="average_pool" ) -> Dict:
"""simple docstring"""
A__ = len(featuremaps[0] )
A__ = int(size_map / size_pooling )
A__ = []
for i_map in range(len(__lowerCAmelCase ) ):
A__ = featuremaps[i_map]
A__ = []
for i_focus in range(0 , __lowerCAmelCase , __lowerCAmelCase ):
for j_focus in range(0 , __lowerCAmelCase , __lowerCAmelCase ):
A__ = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__lowerCAmelCase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__lowerCAmelCase ) )
A__ = np.asmatrix(__lowerCAmelCase ).reshape(__lowerCAmelCase , __lowerCAmelCase )
featuremap_pooled.append(__lowerCAmelCase )
return featuremap_pooled
def a_ ( self : Tuple , __lowerCAmelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
A__ = []
for i in range(len(__lowerCAmelCase ) ):
A__ = np.shape(data[i] )
A__ = data[i].reshape(1 , shapes[0] * shapes[1] )
A__ = data_listed.getA().tolist()[0]
data_expanded.extend(__lowerCAmelCase )
A__ = np.asarray(__lowerCAmelCase )
return data_expanded
def a_ ( self : List[str] , __lowerCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
A__ = np.asarray(__lowerCAmelCase )
A__ = np.shape(__lowerCAmelCase )
A__ = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def a_ ( self : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
A__ = []
A__ = 0
for i_map in range(__lowerCAmelCase ):
A__ = np.ones((size_map, size_map) )
for i in range(0 , __lowerCAmelCase , __lowerCAmelCase ):
for j in range(0 , __lowerCAmelCase , __lowerCAmelCase ):
A__ = pd_pool[
i_pool
]
A__ = i_pool + 1
A__ = np.multiply(
__lowerCAmelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(__lowerCAmelCase )
return pd_all
def a_ ( self : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any]=bool ) -> Union[str, Any]:
"""simple docstring"""
print("""----------------------Start Training-------------------------""" )
print((""" - - Shape: Train_Data """, np.shape(__lowerCAmelCase )) )
print((""" - - Shape: Teach_Data """, np.shape(__lowerCAmelCase )) )
A__ = 0
A__ = []
A__ = 1_00_00
while rp < n_repeat and mse >= error_accuracy:
A__ = 0
print(f'-------------Learning Time {rp}--------------' )
for p in range(len(__lowerCAmelCase ) ):
# print('------------Learning Image: %d--------------'%p)
A__ = np.asmatrix(datas_train[p] )
A__ = np.asarray(datas_teach[p] )
A__ , A__ = self.convolute(
__lowerCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
A__ = self.pooling(__lowerCAmelCase , self.size_poolinga )
A__ = np.shape(__lowerCAmelCase )
A__ = self._expand(__lowerCAmelCase )
A__ = data_bp_input
A__ = np.dot(__lowerCAmelCase , self.vji.T ) - self.thre_bpa
A__ = self.sig(__lowerCAmelCase )
A__ = np.dot(__lowerCAmelCase , self.wkj.T ) - self.thre_bpa
A__ = self.sig(__lowerCAmelCase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
A__ = np.multiply(
(data_teach - bp_outa) , np.multiply(__lowerCAmelCase , (1 - bp_outa) ) )
A__ = np.multiply(
np.dot(__lowerCAmelCase , self.wkj ) , np.multiply(__lowerCAmelCase , (1 - bp_outa) ) )
A__ = np.dot(__lowerCAmelCase , self.vji )
A__ = pd_i_all / (self.size_poolinga * self.size_poolinga)
A__ = pd_conva_pooled.T.getA().tolist()
A__ = self._calculate_gradient_from_pool(
__lowerCAmelCase , __lowerCAmelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
A__ = self._expand_mat(pd_conva_all[k_conv] )
A__ = self.rate_weight * np.dot(__lowerCAmelCase , __lowerCAmelCase )
A__ = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
A__ = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
A__ = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
A__ = self.vji + pd_j_all.T * bp_outa * self.rate_weight
A__ = self.thre_bpa - pd_k_all * self.rate_thre
A__ = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
A__ = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
A__ = rp + 1
A__ = error_count / patterns
all_mse.append(__lowerCAmelCase )
def draw_error():
A__ = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(__lowerCAmelCase , """+-""" )
plt.plot(__lowerCAmelCase , """r--""" )
plt.xlabel("""Learning Times""" )
plt.ylabel("""All_mse""" )
plt.grid(__lowerCAmelCase , alpha=0.5 )
plt.show()
print("""------------------Training Complished---------------------""" )
print((""" - - Training epoch: """, rp, f' - - Mse: {mse:.6f}') )
if draw_e:
draw_error()
return mse
def a_ ( self : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
A__ = []
print("""-------------------Start Testing-------------------------""" )
print((""" - - Shape: Test_Data """, np.shape(__lowerCAmelCase )) )
for p in range(len(__lowerCAmelCase ) ):
A__ = np.asmatrix(datas_test[p] )
A__ , A__ = self.convolute(
__lowerCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
A__ = self.pooling(__lowerCAmelCase , self.size_poolinga )
A__ = self._expand(__lowerCAmelCase )
A__ = data_bp_input
A__ = bp_outa * self.vji.T - self.thre_bpa
A__ = self.sig(__lowerCAmelCase )
A__ = bp_outa * self.wkj.T - self.thre_bpa
A__ = self.sig(__lowerCAmelCase )
produce_out.extend(bp_outa.getA().tolist() )
A__ = [list(map(self.do_round , __lowerCAmelCase ) ) for each in produce_out]
return np.asarray(__lowerCAmelCase )
def a_ ( self : List[str] , __lowerCAmelCase : str ) -> Dict:
"""simple docstring"""
A__ = np.asmatrix(__lowerCAmelCase )
A__ , A__ = self.convolute(
__lowerCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
A__ = self.pooling(__lowerCAmelCase , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 176
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class A (unittest.TestCase ):
'''simple docstring'''
def a_ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
A__ = tempfile.mkdtemp()
# fmt: off
A__ = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
A__ = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
A__ = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
A__ = {"""unk_token""": """<unk>"""}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__lowerCAmelCase ) )
A__ = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"""image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
A__ = os.path.join(self.tmpdirname , __lowerCAmelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def a_ ( self : str , **__lowerCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def a_ ( self : Dict , **__lowerCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def a_ ( self : List[Any] , **__lowerCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def a_ ( self : Optional[Any] ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a_ ( self : int ) -> Tuple:
"""simple docstring"""
A__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a_ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = self.get_image_processor()
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
A__ = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase )
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
A__ = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCAmelCase )
def a_ ( self : int ) -> Any:
"""simple docstring"""
A__ = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A__ = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 )
A__ = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def a_ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(__lowerCAmelCase , return_tensors="""np""" )
A__ = processor(images=__lowerCAmelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a_ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
A__ = """lower newer"""
A__ = processor(text=__lowerCAmelCase )
A__ = tokenizer(__lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a_ ( self : str ) -> Optional[Any]:
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
A__ = """lower newer"""
A__ = self.prepare_image_inputs()
A__ = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def a_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A__ = processor.batch_decode(__lowerCAmelCase )
A__ = tokenizer.batch_decode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def a_ ( self : Dict ) -> str:
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
A__ = """lower newer"""
A__ = self.prepare_image_inputs()
A__ = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 176
| 1
|
"""simple docstring"""
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
SCREAMING_SNAKE_CASE = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def __lowerCAmelCase( __UpperCAmelCase ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ):
"""simple docstring"""
_lowercase : Optional[Any] = True
while ask_again:
_lowercase : List[str] = input(__UpperCAmelCase )
try:
if default is not None and len(__UpperCAmelCase ) == 0:
return default
return convert_value(__UpperCAmelCase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(__UpperCAmelCase )
def __lowerCAmelCase( __UpperCAmelCase ,__UpperCAmelCase=[] ,__UpperCAmelCase=None ,__UpperCAmelCase=0 ):
"""simple docstring"""
_lowercase : List[str] = BulletMenu(__UpperCAmelCase ,__UpperCAmelCase )
_lowercase : int = menu.run(default_choice=__UpperCAmelCase )
return convert_value(__UpperCAmelCase ) if convert_value is not None else result
def __lowerCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
_lowercase : List[str] = int(__UpperCAmelCase )
return ComputeEnvironment(['LOCAL_MACHINE', 'AMAZON_SAGEMAKER'][value] )
def __lowerCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
_lowercase : Tuple = int(__UpperCAmelCase )
return DistributedType(['NO', 'MULTI_CPU', 'MULTI_XPU', 'MULTI_GPU', 'MULTI_NPU', 'TPU'][value] )
def __lowerCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
_lowercase : str = int(__UpperCAmelCase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def __lowerCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
_lowercase : str = int(__UpperCAmelCase )
return PrecisionType(['no', 'fp16', 'bf16', 'fp8'][value] )
def __lowerCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
_lowercase : Optional[Any] = int(__UpperCAmelCase )
return SageMakerDistributedType(['NO', 'DATA_PARALLEL', 'MODEL_PARALLEL'][value] )
def __lowerCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
return {"yes": True, "no": False}[value.lower()]
class _lowerCamelCase (argparse.RawDescriptionHelpFormatter ):
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : str ):
"""simple docstring"""
_lowercase : Tuple = super()._format_usage(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_lowercase : Union[str, Any] = usage.replace('<command> [<args>] ' , '' )
return usage
| 712
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all BART models at https://huggingface.co/models?filter=bart
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
}
SCREAMING_SNAKE_CASE = {
'facebook/bart-base': 1024,
'facebook/bart-large': 1024,
'facebook/bart-large-mnli': 1024,
'facebook/bart-large-cnn': 1024,
'facebook/bart-large-xsum': 1024,
'yjernite/bart_eli5': 1024,
}
@lru_cache()
def __lowerCAmelCase( ):
"""simple docstring"""
_lowercase : Tuple = (
list(range(ord('!' ) ,ord('~' ) + 1 ) ) + list(range(ord('¡' ) ,ord('¬' ) + 1 ) ) + list(range(ord('®' ) ,ord('ÿ' ) + 1 ) )
)
_lowercase : List[Any] = bs[:]
_lowercase : Optional[Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCAmelCase )
cs.append(2**8 + n )
n += 1
_lowercase : Dict = [chr(__UpperCAmelCase ) for n in cs]
return dict(zip(__UpperCAmelCase ,__UpperCAmelCase ) )
def __lowerCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
_lowercase : Tuple = set()
_lowercase : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowercase : Union[str, Any] = char
return pairs
class _lowerCamelCase (__lowerCamelCase ):
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ["input_ids", "attention_mask"]
def __init__( self : int , lowerCamelCase_ : Dict , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any]="replace" , lowerCamelCase_ : List[str]="<s>" , lowerCamelCase_ : Optional[int]="</s>" , lowerCamelCase_ : Tuple="</s>" , lowerCamelCase_ : str="<s>" , lowerCamelCase_ : Dict="<unk>" , lowerCamelCase_ : Optional[Any]="<pad>" , lowerCamelCase_ : List[Any]="<mask>" , lowerCamelCase_ : str=False , **lowerCamelCase_ : str , ):
"""simple docstring"""
_lowercase : int = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else bos_token
_lowercase : int = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token
_lowercase : Dict = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else sep_token
_lowercase : Optional[int] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else cls_token
_lowercase : Any = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else unk_token
_lowercase : Union[str, Any] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowercase : str = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
super().__init__(
errors=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , **lowerCamelCase_ , )
with open(lowerCamelCase_ , encoding='utf-8' ) as vocab_handle:
_lowercase : str = json.load(lowerCamelCase_ )
_lowercase : Tuple = {v: k for k, v in self.encoder.items()}
_lowercase : Any = errors # how to handle errors in decoding
_lowercase : str = bytes_to_unicode()
_lowercase : Optional[int] = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase_ , encoding='utf-8' ) as merges_handle:
_lowercase : Union[str, Any] = merges_handle.read().split('\n' )[1:-1]
_lowercase : Dict = [tuple(merge.split() ) for merge in bpe_merges]
_lowercase : Any = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
_lowercase : Tuple = {}
_lowercase : Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_lowercase : int = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
return len(self.encoder )
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
_lowercase : str = tuple(lowerCamelCase_ )
_lowercase : Dict = get_pairs(lowerCamelCase_ )
if not pairs:
return token
while True:
_lowercase : List[str] = min(lowerCamelCase_ , key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_lowercase , _lowercase : List[Any] = bigram
_lowercase : Optional[int] = []
_lowercase : Optional[Any] = 0
while i < len(lowerCamelCase_ ):
try:
_lowercase : int = word.index(lowerCamelCase_ , lowerCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowercase : Dict = j
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowercase : Tuple = tuple(lowerCamelCase_ )
_lowercase : Dict = new_word
if len(lowerCamelCase_ ) == 1:
break
else:
_lowercase : str = get_pairs(lowerCamelCase_ )
_lowercase : str = ' '.join(lowerCamelCase_ )
_lowercase : List[str] = word
return word
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : str ):
"""simple docstring"""
_lowercase : Union[str, Any] = []
for token in re.findall(self.pat , lowerCamelCase_ ):
_lowercase : List[str] = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(' ' ) )
return bpe_tokens
def __UpperCAmelCase ( self : int , lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token ) )
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : Dict ):
"""simple docstring"""
return self.decoder.get(lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : Any ):
"""simple docstring"""
_lowercase : Dict = ''.join(lowerCamelCase_ )
_lowercase : Optional[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def __UpperCAmelCase ( self : int , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowercase : Union[str, Any] = os.path.join(
lowerCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_lowercase : List[Any] = os.path.join(
lowerCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + '\n' )
_lowercase : Optional[int] = 0
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
_lowercase : int = token_index
writer.write(' '.join(lowerCamelCase_ ) + '\n' )
index += 1
return vocab_file, merge_file
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowercase : Optional[Any] = [self.cls_token_id]
_lowercase : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
_lowercase : int = [self.sep_token_id]
_lowercase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str=False , **lowerCamelCase_ : int ):
"""simple docstring"""
_lowercase : Dict = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()):
_lowercase : Dict = ' ' + text
return (text, kwargs)
| 283
| 0
|
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
A_ = logging.get_logger(__name__)
A_ = {
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/config.json",
# See all BART models at https://huggingface.co/models?filter=bart
}
class __lowercase ( lowercase_ ):
lowercase = "bart"
lowercase = ["past_key_values"]
lowercase = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : List[str] , __lowerCamelCase : str=5_02_65 , __lowerCamelCase : Tuple=10_24 , __lowerCamelCase : Union[str, Any]=12 , __lowerCamelCase : int=40_96 , __lowerCamelCase : Optional[int]=16 , __lowerCamelCase : str=12 , __lowerCamelCase : Dict=40_96 , __lowerCamelCase : Optional[Any]=16 , __lowerCamelCase : str=0.0 , __lowerCamelCase : Optional[int]=0.0 , __lowerCamelCase : Any="gelu" , __lowerCamelCase : str=10_24 , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : Optional[Any]=0.0 , __lowerCamelCase : List[str]=0.0 , __lowerCamelCase : int=0.02 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : int=3 , __lowerCamelCase : Tuple=1 , __lowerCamelCase : Any=0 , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : List[str]=2 , **__lowerCamelCase : List[Any] , ) -> Union[str, Any]:
'''simple docstring'''
lowercase = vocab_size
lowercase = max_position_embeddings
lowercase = d_model
lowercase = encoder_ffn_dim
lowercase = encoder_layers
lowercase = encoder_attention_heads
lowercase = decoder_ffn_dim
lowercase = decoder_layers
lowercase = decoder_attention_heads
lowercase = dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = activation_function
lowercase = init_std
lowercase = encoder_layerdrop
lowercase = decoder_layerdrop
lowercase = classifier_dropout
lowercase = use_cache
lowercase = encoder_layers
lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=a__ , pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , is_encoder_decoder=a__ , decoder_start_token_id=a__ , forced_eos_token_id=a__ , **a__ , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , a__ ):
lowercase = self.bos_token_id
warnings.warn(
f'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'''The config can simply be saved and uploaded again to be fixed.''' )
class __lowercase ( lowercase_ ):
@property
def __a ( self : Any ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowercase = {0: '''batch'''}
lowercase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
lowercase = {0: '''batch''', 1: '''decoder_sequence'''}
lowercase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(a__ , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowercase ,lowercase = self.num_layers
for i in range(a__ ):
lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def __a ( self : Any ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowercase = super().outputs
else:
lowercase = super(a__ , self ).outputs
if self.use_past:
lowercase ,lowercase = self.num_layers
for i in range(a__ ):
lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def __a ( self : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] = -1 , __lowerCamelCase : Dict = -1 , __lowerCamelCase : int = False , __lowerCamelCase : str = None , ) -> Mapping[str, Any]:
'''simple docstring'''
lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
a__ , a__ , a__ , a__ , a__ )
# Generate decoder inputs
lowercase = seq_length if not self.use_past else 1
lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
a__ , a__ , a__ , a__ , a__ )
lowercase = {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
lowercase = dict(**a__ , **a__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase ,lowercase = common_inputs['''input_ids'''].shape
lowercase = common_inputs['''decoder_input_ids'''].shape[1]
lowercase ,lowercase = self.num_attention_heads
lowercase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase = decoder_seq_length + 3
lowercase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowercase = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(a__ , a__ )] , dim=1 )
lowercase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowercase ,lowercase = self.num_layers
lowercase = min(a__ , a__ )
lowercase = max(a__ , a__ ) - min_num_layers
lowercase = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(a__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(a__ ),
torch.zeros(a__ ),
torch.zeros(a__ ),
torch.zeros(a__ ),
) )
# TODO: test this.
lowercase = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(a__ , a__ ):
common_inputs["past_key_values"].append((torch.zeros(a__ ), torch.zeros(a__ )) )
return common_inputs
def __a ( self : str , __lowerCamelCase : Any , __lowerCamelCase : Any = -1 , __lowerCamelCase : Tuple = -1 , __lowerCamelCase : Union[str, Any] = False , __lowerCamelCase : Optional[int] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
a__ , a__ , a__ , a__ , a__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase ,lowercase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowercase = seqlen + 2
lowercase ,lowercase = self.num_layers
lowercase ,lowercase = self.num_attention_heads
lowercase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase = common_inputs['''attention_mask'''].dtype
lowercase = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(a__ , a__ , dtype=a__ )] , dim=1 )
lowercase = [
(torch.zeros(a__ ), torch.zeros(a__ )) for _ in range(a__ )
]
return common_inputs
def __a ( self : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Dict = -1 , __lowerCamelCase : List[Any] = -1 , __lowerCamelCase : Any = False , __lowerCamelCase : List[Any] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
lowercase = compute_effective_axis_dimension(
a__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase = tokenizer.num_special_tokens_to_add(a__ )
lowercase = compute_effective_axis_dimension(
a__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=a__ )
# Generate dummy inputs according to compute batch and sequence
lowercase = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowercase = dict(tokenizer(a__ , return_tensors=a__ ) )
return common_inputs
def __a ( self : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : Union[str, Any] = False , __lowerCamelCase : Optional[Any] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
a__ , batch_size=a__ , seq_length=a__ , is_pair=a__ , framework=a__ )
elif self.task == "causal-lm":
lowercase = self._generate_dummy_inputs_for_causal_lm(
a__ , batch_size=a__ , seq_length=a__ , is_pair=a__ , framework=a__ )
else:
lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
a__ , batch_size=a__ , seq_length=a__ , is_pair=a__ , framework=a__ )
return common_inputs
def __a ( self : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] ) -> str:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowercase = super()._flatten_past_key_values_(a__ , a__ , a__ , a__ )
else:
lowercase = super(a__ , self )._flatten_past_key_values_(
a__ , a__ , a__ , a__ )
| 604
|
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_SCREAMING_SNAKE_CASE : str = logging.getLogger(__name__)
def UpperCamelCase_( snake_case : Tuple , snake_case : str ):
'''simple docstring'''
if os.path.exists(snake_case ):
if os.path.exists(os.path.join(snake_case , "config.json" ) ) and os.path.isfile(
os.path.join(snake_case , "config.json" ) ):
os.remove(os.path.join(snake_case , "config.json" ) )
if os.path.exists(os.path.join(snake_case , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(snake_case , "pytorch_model.bin" ) ):
os.remove(os.path.join(snake_case , "pytorch_model.bin" ) )
else:
os.makedirs(snake_case )
model.save_pretrained(snake_case )
def UpperCamelCase_( snake_case : int , snake_case : Any=False ):
'''simple docstring'''
snake_case_ = 2
if unlogit:
snake_case_ = torch.pow(snake_case , snake_case )
snake_case_ = p * torch.log(snake_case )
snake_case_ = 0
return -plogp.sum(dim=-1 )
def UpperCamelCase_( snake_case : int ):
'''simple docstring'''
logger.info("lv, h >\t" + "\t".join(f'{x + 1}' for x in range(len(snake_case ) ) ) )
for row in range(len(snake_case ) ):
if tensor.dtype != torch.long:
logger.info(f'layer {row + 1}:\t' + "\t".join(f'{x:.5f}' for x in tensor[row].cpu().data ) )
else:
logger.info(f'layer {row + 1}:\t' + "\t".join(f'{x:d}' for x in tensor[row].cpu().data ) )
def UpperCamelCase_( snake_case : Tuple , snake_case : Any , snake_case : List[str] , snake_case : List[str]=True , snake_case : int=True , snake_case : List[Any]=None , snake_case : Optional[int]=False ):
'''simple docstring'''
snake_case_ , snake_case_ = model.config.num_hidden_layers, model.config.num_attention_heads
snake_case_ = torch.zeros(snake_case , snake_case ).to(args.device )
snake_case_ = torch.zeros(snake_case , snake_case ).to(args.device )
if head_mask is None:
snake_case_ = torch.ones(snake_case , snake_case ).to(args.device )
head_mask.requires_grad_(requires_grad=snake_case )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
snake_case_ = None
snake_case_ = 0.0
snake_case_ = 0.0
for step, inputs in enumerate(tqdm(snake_case , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
snake_case_ = tuple(t.to(args.device ) for t in inputs )
((snake_case_) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
snake_case_ = model(snake_case , labels=snake_case , head_mask=snake_case )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
snake_case_ , snake_case_ , snake_case_ = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(snake_case ):
snake_case_ = entropy(attn.detach() , snake_case )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(snake_case ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
snake_case_ = 2
snake_case_ = torch.pow(torch.pow(snake_case , snake_case ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
snake_case_ = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(snake_case )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(snake_case )
logger.info("Head ranked by importance scores" )
snake_case_ = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
snake_case_ = torch.arange(
head_importance.numel() , device=args.device )
snake_case_ = head_ranks.view_as(snake_case )
print_ad_tensor(snake_case )
return attn_entropy, head_importance, total_loss
def UpperCamelCase_( snake_case : List[Any] , snake_case : str , snake_case : Optional[Any] ):
'''simple docstring'''
snake_case_ , snake_case_ , snake_case_ = compute_heads_importance(snake_case , snake_case , snake_case , compute_entropy=snake_case )
snake_case_ = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , snake_case , original_score * args.masking_threshold )
snake_case_ = torch.ones_like(snake_case )
snake_case_ = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
snake_case_ = original_score
while current_score >= original_score * args.masking_threshold:
snake_case_ = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
snake_case_ = float("Inf" )
snake_case_ = head_importance.view(-1 ).sort()[1]
if len(snake_case ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
snake_case_ = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
snake_case_ = new_head_mask.view(-1 )
snake_case_ = 0.0
snake_case_ = new_head_mask.view_as(snake_case )
snake_case_ = new_head_mask.clone().detach()
print_ad_tensor(snake_case )
# Compute metric and head importance again
snake_case_ , snake_case_ , snake_case_ = compute_heads_importance(
snake_case , snake_case , snake_case , compute_entropy=snake_case , head_mask=snake_case )
snake_case_ = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , snake_case , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_0_0 , )
logger.info("Final head mask" )
print_ad_tensor(snake_case )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def UpperCamelCase_( snake_case : List[str] , snake_case : Tuple , snake_case : Any , snake_case : Tuple ):
'''simple docstring'''
snake_case_ = datetime.now()
snake_case_ , snake_case_ , snake_case_ = compute_heads_importance(
snake_case , snake_case , snake_case , compute_entropy=snake_case , compute_importance=snake_case , head_mask=snake_case )
snake_case_ = 1 / loss
snake_case_ = datetime.now() - before_time
snake_case_ = sum(p.numel() for p in model.parameters() )
snake_case_ = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(snake_case ) )
}
for k, v in heads_to_prune.items():
if isinstance(snake_case , snake_case ):
snake_case_ = [
v,
]
assert sum(len(snake_case ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(snake_case )
snake_case_ = sum(p.numel() for p in model.parameters() )
snake_case_ = datetime.now()
snake_case_ , snake_case_ , snake_case_ = compute_heads_importance(
snake_case , snake_case , snake_case , compute_entropy=snake_case , compute_importance=snake_case , head_mask=snake_case , actually_pruned=snake_case , )
snake_case_ = 1 / loss
snake_case_ = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , snake_case , snake_case , pruned_num_params / original_num_params * 1_0_0 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , snake_case , snake_case )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 1_0_0 )
save_model(snake_case , args.output_dir )
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=snake_case , type=snake_case , required=snake_case , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=snake_case , type=snake_case , required=snake_case , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=snake_case , type=snake_case , required=snake_case , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=snake_case , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=snake_case , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=snake_case , type=snake_case , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=snake_case , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=snake_case , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=snake_case , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=snake_case , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=1_2_8 , type=snake_case , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=snake_case , help="Batch size." )
parser.add_argument("--seed" , type=snake_case , default=4_2 )
parser.add_argument("--local_rank" , type=snake_case , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=snake_case , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=snake_case , default="" , help="Can be used for distant debugging." )
snake_case_ = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=snake_case )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
snake_case_ = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
snake_case_ = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
snake_case_ = torch.device("cuda" , args.local_rank )
snake_case_ = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
snake_case_ = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
snake_case_ = nn.parallel.DistributedDataParallel(
snake_case , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=snake_case )
elif args.n_gpu > 1:
snake_case_ = nn.DataParallel(snake_case )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=snake_case )
torch.save(snake_case , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , snake_case )
# Prepare dataset
snake_case_ = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
snake_case_ = (torch.from_numpy(snake_case ),)
snake_case_ = TensorDataset(*snake_case )
snake_case_ = RandomSampler(snake_case )
snake_case_ = DataLoader(snake_case , sampler=snake_case , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(snake_case , snake_case , snake_case )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
snake_case_ = mask_heads(snake_case , snake_case , snake_case )
prune_heads(snake_case , snake_case , snake_case , snake_case )
if __name__ == "__main__":
main()
| 400
| 0
|
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str=7 , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : Union[str, Any]=18 , UpperCAmelCase_ : str=30 , UpperCAmelCase_ : Any=400 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : int=None , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : str=[0.5, 0.5, 0.5] , UpperCAmelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , UpperCAmelCase_ : Dict=False , ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = size if size is not None else {'''height''': 20, '''width''': 20}
_UpperCAmelCase : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_UpperCAmelCase : Tuple = parent
_UpperCAmelCase : Tuple = batch_size
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Dict = image_size
_UpperCAmelCase : List[Any] = min_resolution
_UpperCAmelCase : Tuple = max_resolution
_UpperCAmelCase : str = do_resize
_UpperCAmelCase : str = size
_UpperCAmelCase : Optional[int] = do_center_crop
_UpperCAmelCase : Tuple = crop_size
_UpperCAmelCase : Union[str, Any] = do_normalize
_UpperCAmelCase : Any = image_mean
_UpperCAmelCase : Tuple = image_std
_UpperCAmelCase : Dict = do_reduce_labels
def a_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def _A ( ):
_UpperCAmelCase : int = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
_UpperCAmelCase : str = Image.open(dataset[0]['''file'''] )
_UpperCAmelCase : List[Any] = Image.open(dataset[1]['''file'''] )
return image, map
def _A ( ):
_UpperCAmelCase : Optional[Any] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
_UpperCAmelCase : Any = Image.open(ds[0]['''file'''] )
_UpperCAmelCase : Optional[Any] = Image.open(ds[1]['''file'''] )
_UpperCAmelCase : Any = Image.open(ds[2]['''file'''] )
_UpperCAmelCase : Tuple = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class lowerCAmelCase_ ( lowercase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = BeitImageProcessor if is_vision_available() else None
def a_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Tuple = BeitImageProcessingTester(self )
@property
def a_ ( self : Dict ) -> Dict:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self : Tuple ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , '''size''' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , '''do_center_crop''' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , '''center_crop''' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , '''image_std''' ) )
def a_ ( self : int ) -> Any:
'''simple docstring'''
_UpperCAmelCase : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , UpperCAmelCase_ )
_UpperCAmelCase : Dict = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=UpperCAmelCase_ )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , UpperCAmelCase_ )
def a_ ( self : int ) -> int:
'''simple docstring'''
pass
def a_ ( self : List[str] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
_UpperCAmelCase : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCAmelCase : Optional[int] = image_processing(UpperCAmelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def a_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
_UpperCAmelCase : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCAmelCase : List[str] = image_processing(UpperCAmelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def a_ ( self : Dict ) -> Any:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
_UpperCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCAmelCase : Tuple = image_processing(UpperCAmelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def a_ ( self : Dict ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
_UpperCAmelCase : Tuple = []
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
_UpperCAmelCase : List[str] = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched
_UpperCAmelCase : Union[str, Any] = image_processing(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test not batched input (PIL images)
_UpperCAmelCase , _UpperCAmelCase : List[str] = prepare_semantic_single_inputs()
_UpperCAmelCase : Tuple = image_processing(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched input (PIL images)
_UpperCAmelCase , _UpperCAmelCase : List[Any] = prepare_semantic_batch_inputs()
_UpperCAmelCase : Any = image_processing(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
def a_ ( self : Optional[int] ) -> Any:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
_UpperCAmelCase , _UpperCAmelCase : int = prepare_semantic_single_inputs()
_UpperCAmelCase : Tuple = image_processing(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 150 )
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : List[Any] = image_processing(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
| 416
|
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Tuple = '▁'
UpperCAmelCase__ : Tuple = {'vocab_file': 'prophetnet.tokenizer'}
UpperCAmelCase__ : Any = {
'vocab_file': {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'
),
}
}
UpperCAmelCase__ : Tuple = {
'microsoft/xprophetnet-large-wiki100-cased': {'do_lower_case': False},
}
UpperCAmelCase__ : List[Any] = {
'microsoft/xprophetnet-large-wiki100-cased': 512,
}
def _A ( _UpperCamelCase ):
_UpperCAmelCase : int = collections.OrderedDict()
with open(_UpperCamelCase , '''r''' , encoding='''utf-8''' ) as reader:
_UpperCAmelCase : List[Any] = reader.readlines()
for index, token in enumerate(_UpperCamelCase ):
_UpperCAmelCase : Tuple = token.rstrip('''\n''' )
_UpperCAmelCase : List[str] = index
return vocab
class lowerCAmelCase_ ( lowercase_ ):
SCREAMING_SNAKE_CASE_ : str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : Any="[SEP]" , UpperCAmelCase_ : List[str]="[SEP]" , UpperCAmelCase_ : List[Any]="[SEP]" , UpperCAmelCase_ : Tuple="[UNK]" , UpperCAmelCase_ : int="[PAD]" , UpperCAmelCase_ : Optional[int]="[CLS]" , UpperCAmelCase_ : int="[MASK]" , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , **UpperCAmelCase_ : Optional[Any] , ) -> None:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
_UpperCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase_ ) )
_UpperCAmelCase : int = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
_UpperCAmelCase : Tuple = {'''[PAD]''': 0, '''[CLS]''': 1, '''[SEP]''': 2, '''[UNK]''': 3, '''[MASK]''': 4}
for i in range(10 ):
_UpperCAmelCase : Optional[int] = F'''[unused{i}]'''
_UpperCAmelCase : Dict = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
_UpperCAmelCase : str = 12
_UpperCAmelCase : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(UpperCAmelCase_ )
def __getstate__( self : Any ) -> int:
'''simple docstring'''
_UpperCAmelCase : Any = self.__dict__.copy()
_UpperCAmelCase : Optional[Any] = None
return state
def __setstate__( self : Optional[Any] , UpperCAmelCase_ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : str = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCAmelCase : Dict = {}
_UpperCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a_ ( self : Union[str, Any] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
if token_ids_a is None:
return ([0] * len(UpperCAmelCase_ )) + [1]
return ([0] * len(UpperCAmelCase_ )) + [1] + ([0] * len(UpperCAmelCase_ )) + [1]
def a_ ( self : Tuple , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_UpperCAmelCase : Dict = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def a_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset
def a_ ( self : str ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a_ ( self : Dict , UpperCAmelCase_ : str ) -> str:
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ )
def a_ ( self : str , UpperCAmelCase_ : List[str] ) -> Optional[Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase : Optional[int] = self.sp_model.PieceToId(UpperCAmelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def a_ ( self : List[Any] , UpperCAmelCase_ : Dict ) -> Any:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def a_ ( self : List[Any] , UpperCAmelCase_ : Tuple ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = ''''''.join(UpperCAmelCase_ ).replace(UpperCAmelCase_ , ''' ''' ).strip()
return out_string
def a_ ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCAmelCase : Tuple = os.path.join(
UpperCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase_ , '''wb''' ) as fi:
_UpperCAmelCase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (out_vocab_file,)
def a_ ( self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
_UpperCAmelCase : Optional[Any] = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 416
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
lowerCAmelCase_ = None
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
},
"""tokenizer_file""": {
"""google/bigbird-roberta-base""": (
"""https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"""
),
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase_ = {
"""google/bigbird-roberta-base""": 40_96,
"""google/bigbird-roberta-large""": 40_96,
"""google/bigbird-base-trivia-itc""": 40_96,
}
lowerCAmelCase_ = """▁"""
class _lowerCAmelCase ( _lowercase ):
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = BigBirdTokenizer
A__ = ['input_ids', 'attention_mask']
A__ = []
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase="[CLS]" , **__UpperCAmelCase , ):
lowerCAmelCase__ : int = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else bos_token
lowerCAmelCase__ : Optional[Any] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else eos_token
lowerCAmelCase__ : Tuple = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else unk_token
lowerCAmelCase__ : Tuple = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else pad_token
lowerCAmelCase__ : Tuple = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else cls_token
lowerCAmelCase__ : int = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ : List[Any] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ : Optional[int] = vocab_file
lowerCAmelCase__ : str = False if not self.vocab_file else True
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = None ):
lowerCAmelCase__ : Dict = [self.sep_token_id]
lowerCAmelCase__ : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1]
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = None ):
lowerCAmelCase__ : int = [self.sep_token_id]
lowerCAmelCase__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ : Optional[int] = os.path.join(
__UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 678
|
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def __magic_name__( self , __UpperCAmelCase ):
lowerCAmelCase__ : Union[str, Any] = 3
lowerCAmelCase__ : Tuple = 250
lowerCAmelCase__ : List[Any] = ids_tensor((batch_size, length) , __UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = torch.ones((batch_size, length) , device=__UpperCAmelCase , dtype=torch.float ) / length
return input_ids, scores
def __magic_name__( self ):
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self._get_tensors(5 )
lowerCAmelCase__ : List[str] = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self._get_tensors(9 )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self._get_tensors(10 )
self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
def __magic_name__( self ):
lowerCAmelCase__ : Tuple = MaxLengthCriteria(max_length=10 )
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self._get_tensors(5 )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self._get_tensors(9 )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(10 )
self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
def __magic_name__( self ):
lowerCAmelCase__ : Optional[int] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(5 )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(9 )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self._get_tensors(10 )
self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ : int = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def __magic_name__( self ):
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self._get_tensors(5 )
lowerCAmelCase__ : Any = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ : int = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
def __magic_name__( self ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(__UpperCAmelCase ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
lowerCAmelCase__ : List[str] = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(__UpperCAmelCase ) , 1 )
| 678
| 1
|
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def lowercase__ ( __A: int ,__A: Optional[Any]=7 ):
'''simple docstring'''
__magic_name__ : List[str] = None
if token is not None:
__magic_name__ : Optional[int] = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
__magic_name__ : List[str] = '''636036'''
__magic_name__ : int = F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
__magic_name__ : str = requests.get(__A ,headers=__A ).json()
return result["workflow_runs"]
def lowercase__ ( __A: str ):
'''simple docstring'''
__magic_name__ : int = get_daily_ci_runs(__A )
__magic_name__ : Tuple = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
__magic_name__ : List[Any] = workflow_run['''id''']
break
return workflow_run_id
def lowercase__ ( __A: List[Any] ,__A: Optional[Any] ,__A: Union[str, Any] ):
'''simple docstring'''
__magic_name__ : int = get_last_daily_ci_runs(__A )
if workflow_run_id is not None:
__magic_name__ : int = get_artifacts_links(worflow_run_id=__A ,token=__A )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
__magic_name__ : Optional[Any] = artifacts_links[artifact_name]
download_artifact(
artifact_name=__A ,artifact_url=__A ,output_dir=__A ,token=__A )
def lowercase__ ( __A: int ,__A: Optional[int] ,__A: Optional[int] ):
'''simple docstring'''
get_last_daily_ci_artifacts(__A ,__A ,__A )
__magic_name__ : List[str] = {}
for artifact_name in artifact_names:
__magic_name__ : str = os.path.join(__A ,F'''{artifact_name}.zip''' )
if os.path.isfile(__A ):
__magic_name__ : List[str] = {}
with zipfile.ZipFile(__A ) as z:
for filename in z.namelist():
if not os.path.isdir(__A ):
# read the file
with z.open(__A ) as f:
__magic_name__ : Optional[int] = f.read().decode('''UTF-8''' )
return results
| 708
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : List[str] = {
'''configuration_roberta_prelayernorm''': [
'''ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''RobertaPreLayerNormConfig''',
'''RobertaPreLayerNormOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
'''ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaPreLayerNormForCausalLM''',
'''RobertaPreLayerNormForMaskedLM''',
'''RobertaPreLayerNormForMultipleChoice''',
'''RobertaPreLayerNormForQuestionAnswering''',
'''RobertaPreLayerNormForSequenceClassification''',
'''RobertaPreLayerNormForTokenClassification''',
'''RobertaPreLayerNormModel''',
'''RobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
'''TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaPreLayerNormForCausalLM''',
'''TFRobertaPreLayerNormForMaskedLM''',
'''TFRobertaPreLayerNormForMultipleChoice''',
'''TFRobertaPreLayerNormForQuestionAnswering''',
'''TFRobertaPreLayerNormForSequenceClassification''',
'''TFRobertaPreLayerNormForTokenClassification''',
'''TFRobertaPreLayerNormMainLayer''',
'''TFRobertaPreLayerNormModel''',
'''TFRobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
'''FlaxRobertaPreLayerNormForCausalLM''',
'''FlaxRobertaPreLayerNormForMaskedLM''',
'''FlaxRobertaPreLayerNormForMultipleChoice''',
'''FlaxRobertaPreLayerNormForQuestionAnswering''',
'''FlaxRobertaPreLayerNormForSequenceClassification''',
'''FlaxRobertaPreLayerNormForTokenClassification''',
'''FlaxRobertaPreLayerNormModel''',
'''FlaxRobertaPreLayerNormPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 501
| 0
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__UpperCamelCase : List[str] = get_tests_dir("""fixtures""")
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase = mock.Mock()
__lowercase = 500
__lowercase = {}
__lowercase = HTTPError
__lowercase = {}
# Download this model to make sure it's in the cache.
__lowercase = WavaVecaFeatureExtractor.from_pretrained("""hf-internal-testing/tiny-random-wav2vec2""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=_lowerCAmelCase ) as mock_head:
__lowercase = WavaVecaFeatureExtractor.from_pretrained("""hf-internal-testing/tiny-random-wav2vec2""" )
# This check we did call the fake head request
mock_head.assert_called()
def _a ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase = WavaVecaFeatureExtractor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json""" )
@is_staging_test
class __UpperCamelCase ( unittest.TestCase ):
@classmethod
def _a ( cls : int ) -> int:
"""simple docstring"""
__lowercase = TOKEN
HfFolder.save_token(_lowerCAmelCase )
@classmethod
def _a ( cls : Any ) -> Any:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="""test-feature-extractor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-feature-extractor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-feature-extractor""" )
except HTTPError:
pass
def _a ( self : Optional[Any] ) -> str:
"""simple docstring"""
__lowercase = WavaVecaFeatureExtractor.from_pretrained(_lowerCAmelCase )
feature_extractor.push_to_hub("""test-feature-extractor""" , use_auth_token=self._token )
__lowercase = WavaVecaFeatureExtractor.from_pretrained(F'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-feature-extractor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_lowerCAmelCase , repo_id="""test-feature-extractor""" , push_to_hub=_lowerCAmelCase , use_auth_token=self._token )
__lowercase = WavaVecaFeatureExtractor.from_pretrained(F'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) )
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = WavaVecaFeatureExtractor.from_pretrained(_lowerCAmelCase )
feature_extractor.push_to_hub("""valid_org/test-feature-extractor""" , use_auth_token=self._token )
__lowercase = WavaVecaFeatureExtractor.from_pretrained("""valid_org/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-feature-extractor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_lowerCAmelCase , repo_id="""valid_org/test-feature-extractor-org""" , push_to_hub=_lowerCAmelCase , use_auth_token=self._token )
__lowercase = WavaVecaFeatureExtractor.from_pretrained("""valid_org/test-feature-extractor-org""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) )
def _a ( self : Tuple ) -> Dict:
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
__lowercase = CustomFeatureExtractor.from_pretrained(_lowerCAmelCase )
feature_extractor.push_to_hub("""test-dynamic-feature-extractor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor"""} , )
__lowercase = AutoFeatureExtractor.from_pretrained(
F'{USER}/test-dynamic-feature-extractor' , trust_remote_code=_lowerCAmelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , """CustomFeatureExtractor""" )
| 80
|
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def SCREAMING_SNAKE_CASE ( a_ : Tuple , a_ : Dict ):
__a = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
__a = DatasetInfosDict.from_directory(a_ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def SCREAMING_SNAKE_CASE ( a_ : Optional[int] , a_ : DatasetInfo ):
__a = str(a_ )
dataset_info.write_to_directory(a_ )
__a = DatasetInfo.from_directory(a_ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(a_ , 'dataset_info.json' ) )
def SCREAMING_SNAKE_CASE ( ):
__a = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
__a = dataset_info._to_yaml_dict()
assert sorted(a_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
__a = yaml.safe_dump(a_ )
__a = yaml.safe_load(a_ )
assert dataset_info_yaml_dict == reloaded
def SCREAMING_SNAKE_CASE ( ):
__a = DatasetInfo()
__a = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def SCREAMING_SNAKE_CASE ( a_ : List[str] , a_ : DatasetInfosDict ):
__a = str(a_ )
dataset_infos_dict.write_to_directory(a_ )
__a = DatasetInfosDict.from_directory(a_ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
__a = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
__a = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(a_ , 'README.md' ) )
| 539
| 0
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
A__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : WhisperForConditionalGeneration, lowerCamelCase : WhisperProcessor, lowerCamelCase : AutoencoderKL, lowerCamelCase : CLIPTextModel, lowerCamelCase : CLIPTokenizer, lowerCamelCase : UNetaDConditionModel, lowerCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], lowerCamelCase : StableDiffusionSafetyChecker, lowerCamelCase : CLIPImageProcessor, ):
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
speech_model=lowerCamelCase, speech_processor=lowerCamelCase, vae=lowerCamelCase, text_encoder=lowerCamelCase, tokenizer=lowerCamelCase, unet=lowerCamelCase, scheduler=lowerCamelCase, feature_extractor=lowerCamelCase, )
def lowercase__ ( self : Optional[Any], lowerCamelCase : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
lowercase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
self.enable_attention_slicing(lowerCamelCase )
@torch.no_grad()
def __call__( self : Any, lowerCamelCase : Optional[Any], lowerCamelCase : Optional[Any]=16_000, lowerCamelCase : int = 512, lowerCamelCase : int = 512, lowerCamelCase : int = 50, lowerCamelCase : float = 7.5, lowerCamelCase : Optional[Union[str, List[str]]] = None, lowerCamelCase : Optional[int] = 1, lowerCamelCase : float = 0.0, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : Optional[torch.FloatTensor] = None, lowerCamelCase : Optional[str] = "pil", lowerCamelCase : bool = True, lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, lowerCamelCase : int = 1, **lowerCamelCase : Optional[Any], ):
'''simple docstring'''
lowercase__ = self.speech_processor.feature_extractor(
lowerCamelCase, return_tensors='''pt''', sampling_rate=lowerCamelCase ).input_features.to(self.device )
lowercase__ = self.speech_model.generate(lowerCamelCase, max_length=480_000 )
lowercase__ = self.speech_processor.tokenizer.batch_decode(lowerCamelCase, skip_special_tokens=lowerCamelCase, normalize=lowerCamelCase )[
0
]
if isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = 1
elif isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = len(lowerCamelCase )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase, lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(lowerCamelCase )}.""" )
# get prompt text embeddings
lowercase__ = self.tokenizer(
lowerCamelCase, padding='''max_length''', max_length=self.tokenizer.model_max_length, return_tensors='''pt''', )
lowercase__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowercase__ = text_input_ids[:, : self.tokenizer.model_max_length]
lowercase__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowercase__ , lowercase__ , lowercase__ = text_embeddings.shape
lowercase__ = text_embeddings.repeat(1, lowerCamelCase, 1 )
lowercase__ = text_embeddings.view(bs_embed * num_images_per_prompt, lowerCamelCase, -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase__ = 42
if negative_prompt is None:
lowercase__ = [''''''] * batch_size
elif type(lowerCamelCase ) is not type(lowerCamelCase ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase )} !="""
F""" {type(lowerCamelCase )}.""" )
elif isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = [negative_prompt]
elif batch_size != len(lowerCamelCase ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
''' the batch size of `prompt`.''' )
else:
lowercase__ = negative_prompt
lowercase__ = text_input_ids.shape[-1]
lowercase__ = self.tokenizer(
lowerCamelCase, padding='''max_length''', max_length=lowerCamelCase, truncation=lowerCamelCase, return_tensors='''pt''', )
lowercase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase__ = uncond_embeddings.shape[1]
lowercase__ = uncond_embeddings.repeat(1, lowerCamelCase, 1 )
lowercase__ = uncond_embeddings.view(batch_size * num_images_per_prompt, lowerCamelCase, -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowercase__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowercase__ = torch.randn(lowerCamelCase, generator=lowerCamelCase, device='''cpu''', dtype=lowerCamelCase ).to(
self.device )
else:
lowercase__ = torch.randn(lowerCamelCase, generator=lowerCamelCase, device=self.device, dtype=lowerCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
lowercase__ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowerCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowercase__ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase__ = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase__ = {}
if accepts_eta:
lowercase__ = eta
for i, t in enumerate(self.progress_bar(lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = self.scheduler.scale_model_input(lowerCamelCase, lowerCamelCase )
# predict the noise residual
lowercase__ = self.unet(lowerCamelCase, lowerCamelCase, encoder_hidden_states=lowerCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = 1 / 0.18215 * latents
lowercase__ = self.vae.decode(lowerCamelCase ).sample
lowercase__ = (image / 2 + 0.5).clamp(0, 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ = image.cpu().permute(0, 2, 3, 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowerCamelCase, nsfw_content_detected=lowerCamelCase )
| 671
|
from __future__ import annotations
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 0.00
lowercase__ = 0
for resistor in resistors:
if resistor <= 0:
lowercase__ = F"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(lowerCamelCase_ )
first_sum += 1 / float(lowerCamelCase_ )
index += 1
return 1 / first_sum
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 0.00
lowercase__ = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowercase__ = F"""Resistor at index {index} has a negative value!"""
raise ValueError(lowerCamelCase_ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671
| 1
|
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowerCamelCase_()-> Dict:
_SCREAMING_SNAKE_CASE : List[Any] = {
"""repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""],
"""path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""],
"""content""": ["""a """ * 20, """a """ * 30, """b """ * 7],
}
_SCREAMING_SNAKE_CASE : str = Dataset.from_dict(__SCREAMING_SNAKE_CASE )
return dataset
class _snake_case ( __snake_case ):
"""simple docstring"""
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = get_dataset()
_SCREAMING_SNAKE_CASE : Optional[Any] = make_duplicate_clusters(_A , 0.85)
self.assertEqual(len(duplicate_clusters[0]) , 2)
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = get_dataset()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = deduplicate_dataset(_A)
self.assertEqual(len(_A) , 2)
print(_A)
self.assertEqual(duplicate_clusters[0][0]["""copies"""] , 2)
self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] , _A)
| 338
|
"""simple docstring"""
def lowerCamelCase_(__SCREAMING_SNAKE_CASE = 1_000 )-> int:
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 338
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCamelCase : str ={
"configuration_bridgetower": [
"BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BridgeTowerConfig",
"BridgeTowerTextConfig",
"BridgeTowerVisionConfig",
],
"processing_bridgetower": ["BridgeTowerProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[int] =["BridgeTowerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Union[str, Any] =[
"BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST",
"BridgeTowerForContrastiveLearning",
"BridgeTowerForImageAndTextRetrieval",
"BridgeTowerForMaskedLM",
"BridgeTowerModel",
"BridgeTowerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Dict =_LazyModule(__name__, globals()["__file__"], _import_structure)
| 575
|
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCamelCase_ ( A_ , A_ ):
__lowerCamelCase = old_name
if "patch_embed" in old_name:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = old_name.split('''.''' )
if layer == "0":
__lowerCamelCase = old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
__lowerCamelCase = old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
__lowerCamelCase = old_name.replace('''3''' , '''convolution2''' )
else:
__lowerCamelCase = old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(R'''\d\.\d''' , A_ ):
__lowerCamelCase = R'''\b\d{2}\b'''
if bool(re.search(A_ , A_ ) ):
__lowerCamelCase = re.search(R'''\d\.\d\d.''' , A_ ).group()
else:
__lowerCamelCase = re.search(R'''\d\.\d.''' , A_ ).group()
if int(match[0] ) < 6:
__lowerCamelCase = old_name.replace(A_ , '''''' )
__lowerCamelCase = trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
__lowerCamelCase = '''intermediate_stages.''' + trimmed_name
else:
__lowerCamelCase = old_name.replace(A_ , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
__lowerCamelCase = trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
__lowerCamelCase = str(int(match[2] ) - num_meta4D_last_stage )
__lowerCamelCase = trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
__lowerCamelCase = trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
__lowerCamelCase = trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
__lowerCamelCase = trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
__lowerCamelCase = trimmed_name.replace('''fc2''' , '''linear_out''' )
__lowerCamelCase = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(R'''.\d.''' , A_ ):
__lowerCamelCase = old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
__lowerCamelCase = new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__lowerCamelCase = new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__lowerCamelCase = new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
__lowerCamelCase = new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
__lowerCamelCase = new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
__lowerCamelCase = new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
__lowerCamelCase = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__lowerCamelCase = new_name.replace('''norm''' , '''layernorm''' )
__lowerCamelCase = '''efficientformer.''' + new_name
else:
__lowerCamelCase = '''efficientformer.encoder.''' + new_name
return new_name
def lowerCamelCase_ ( A_ , A_ ):
for key in checkpoint.copy().keys():
__lowerCamelCase = checkpoint.pop(A_ )
__lowerCamelCase = val
return checkpoint
def lowerCamelCase_ ( ):
__lowerCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowerCamelCase = Image.open(requests.get(A_ , stream=A_ ).raw )
return image
def lowerCamelCase_ ( A_ , A_ , A_ , A_ ):
__lowerCamelCase = torch.load(A_ , map_location='''cpu''' )['''model''']
__lowerCamelCase = EfficientFormerConfig.from_json_file(A_ )
__lowerCamelCase = EfficientFormerForImageClassificationWithTeacher(A_ )
__lowerCamelCase = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
__lowerCamelCase = config.depths[-1] - config.num_metaad_blocks + 1
__lowerCamelCase = convert_torch_checkpoint(A_ , A_ )
model.load_state_dict(A_ )
model.eval()
__lowerCamelCase = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
__lowerCamelCase = prepare_img()
__lowerCamelCase = 2_56
__lowerCamelCase = 2_24
__lowerCamelCase = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
__lowerCamelCase = processor(images=A_ , return_tensors='''pt''' ).pixel_values
# original processing pipeline
__lowerCamelCase = Compose(
[
Resize(A_ , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(A_ ),
ToTensor(),
Normalize(A_ , A_ ),
] )
__lowerCamelCase = image_transforms(A_ ).unsqueeze(0 )
assert torch.allclose(A_ , A_ )
__lowerCamelCase = model(A_ )
__lowerCamelCase = outputs.logits
__lowerCamelCase = (1, 10_00)
if "l1" in model_name:
__lowerCamelCase = torch.Tensor(
[-0.1_312, 0.4_353, -1.0_499, -0.5_124, 0.4_183, -0.6_793, -1.3_777, -0.0_893, -0.7_358, -2.4_328] )
assert torch.allclose(logits[0, :10] , A_ , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
__lowerCamelCase = torch.Tensor(
[-1.3_150, -1.5_456, -1.2_556, -0.8_496, -0.7_127, -0.7_897, -0.9_728, -0.3_052, 0.3_751, -0.3_127] )
assert torch.allclose(logits[0, :10] , A_ , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
__lowerCamelCase = torch.Tensor(
[-1.0_283, -1.4_131, -0.5_644, -1.3_115, -0.5_785, -1.2_049, -0.7_528, 0.1_992, -0.3_822, -0.0_878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(A_ ).mkdir(exist_ok=A_ )
model.save_pretrained(A_ )
print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(A_ )
print(f'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add model''' , use_temp_dir=A_ , )
processor.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add image processor''' , use_temp_dir=A_ , )
if __name__ == "__main__":
_UpperCamelCase : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path",
default=None,
type=str,
required=True,
help="Path to EfficientFormer pytorch checkpoint.",
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for EfficientFormer model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
parser.set_defaults(push_to_hub=True)
_UpperCamelCase : Tuple =parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 575
| 1
|
"""simple docstring"""
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ ) ->str:
__lowercase = os.path.abspath(__magic_name__ )
logger.info(F'''Converting TensorFlow checkpoint from {tf_path}''' )
# Load weights from TF model
__lowercase = tf.train.list_variables(__magic_name__ )
__lowercase = []
__lowercase = []
__lowercase = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
__lowercase = full_name.split("/" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F'''Skipping non-model layer {full_name}''' )
continue
if "optimizer" in full_name:
logger.info(F'''Skipping optimization layer {full_name}''' )
continue
if name[0] == "model":
# ignore initial 'model'
__lowercase = name[1:]
# figure out how many levels deep the name is
__lowercase = 0
for _name in name:
if _name.startswith("layer_with_weights" ):
depth += 1
else:
break
layer_depth.append(__magic_name__ )
# read data
__lowercase = tf.train.load_variable(__magic_name__ , __magic_name__ )
names.append("/".join(__magic_name__ ) )
arrays.append(__magic_name__ )
logger.info(F'''Read a total of {len(__magic_name__ ):,} layers''' )
# Sanity check
if len(set(__magic_name__ ) ) != 1:
raise ValueError(F'''Found layer names with different depths (layer depth {list(set(__magic_name__ ) )})''' )
__lowercase = list(set(__magic_name__ ) )[0]
if layer_depth != 1:
raise ValueError(
"The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"
" heads." )
# convert layers
logger.info("Converting weights..." )
for full_name, array in zip(__magic_name__ , __magic_name__ ):
__lowercase = full_name.split("/" )
__lowercase = model
__lowercase = []
for i, m_name in enumerate(__magic_name__ ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("layer_with_weights" ):
__lowercase = int(m_name.split("-" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["embeddings", "LayerNorm"] )
__lowercase = getattr(__magic_name__ , "embeddings" )
__lowercase = getattr(__magic_name__ , "LayerNorm" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["encoder", "layer", str(layer_num - 4 )] )
__lowercase = getattr(__magic_name__ , "encoder" )
__lowercase = getattr(__magic_name__ , "layer" )
__lowercase = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["pooler", "dense"] )
__lowercase = getattr(__magic_name__ , "pooler" )
__lowercase = getattr(__magic_name__ , "dense" )
elif m_name == "embeddings":
trace.append("embeddings" )
__lowercase = getattr(__magic_name__ , "embeddings" )
if layer_num == 0:
trace.append("word_embeddings" )
__lowercase = getattr(__magic_name__ , "word_embeddings" )
elif layer_num == 1:
trace.append("position_embeddings" )
__lowercase = getattr(__magic_name__ , "position_embeddings" )
elif layer_num == 2:
trace.append("token_type_embeddings" )
__lowercase = getattr(__magic_name__ , "token_type_embeddings" )
else:
raise ValueError(F'''Unknown embedding layer with name {full_name}''' )
trace.append("weight" )
__lowercase = getattr(__magic_name__ , "weight" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["attention", "self"] )
__lowercase = getattr(__magic_name__ , "attention" )
__lowercase = getattr(__magic_name__ , "self" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["attention", "output", "LayerNorm"] )
__lowercase = getattr(__magic_name__ , "attention" )
__lowercase = getattr(__magic_name__ , "output" )
__lowercase = getattr(__magic_name__ , "LayerNorm" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["attention", "output", "dense"] )
__lowercase = getattr(__magic_name__ , "attention" )
__lowercase = getattr(__magic_name__ , "output" )
__lowercase = getattr(__magic_name__ , "dense" )
elif m_name == "_output_dense":
# output dense
trace.extend(["output", "dense"] )
__lowercase = getattr(__magic_name__ , "output" )
__lowercase = getattr(__magic_name__ , "dense" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["output", "LayerNorm"] )
__lowercase = getattr(__magic_name__ , "output" )
__lowercase = getattr(__magic_name__ , "LayerNorm" )
elif m_name == "_key_dense":
# attention key
trace.append("key" )
__lowercase = getattr(__magic_name__ , "key" )
elif m_name == "_query_dense":
# attention query
trace.append("query" )
__lowercase = getattr(__magic_name__ , "query" )
elif m_name == "_value_dense":
# attention value
trace.append("value" )
__lowercase = getattr(__magic_name__ , "value" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["intermediate", "dense"] )
__lowercase = getattr(__magic_name__ , "intermediate" )
__lowercase = getattr(__magic_name__ , "dense" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("output" )
__lowercase = getattr(__magic_name__ , "output" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("bias" )
__lowercase = getattr(__magic_name__ , "bias" )
elif m_name in ["kernel", "gamma"]:
trace.append("weight" )
__lowercase = getattr(__magic_name__ , "weight" )
else:
logger.warning(F'''Ignored {m_name}''' )
# for certain layers reshape is necessary
__lowercase = ".".join(__magic_name__ )
if re.match(R"(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)" , __magic_name__ ) or re.match(
R"(\S+)\.attention\.output\.dense\.weight" , __magic_name__ ):
__lowercase = array.reshape(pointer.data.shape )
if "kernel" in full_name:
__lowercase = array.transpose()
if pointer.shape == array.shape:
__lowercase = torch.from_numpy(__magic_name__ )
else:
raise ValueError(
F'''Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'''
F''' {array.shape}''' )
logger.info(F'''Successfully set variable {full_name} to PyTorch layer {trace}''' )
return model
def lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ ) ->str:
# Instantiate model
logger.info(F'''Loading model based on config from {config_path}...''' )
__lowercase = BertConfig.from_json_file(__magic_name__ )
__lowercase = BertModel(__magic_name__ )
# Load weights from checkpoint
logger.info(F'''Loading weights from checkpoint {tf_checkpoint_path}...''' )
load_tfa_weights_in_bert(__magic_name__ , __magic_name__ , __magic_name__ )
# Save pytorch-model
logger.info(F'''Saving PyTorch model to {pytorch_dump_path}...''' )
torch.save(model.state_dict() , __magic_name__ )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow 2.x checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model (must include filename).''',
)
_lowercase = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 118
|
"""simple docstring"""
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __a ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
# A mock response for an HTTP head request to emulate server down
__lowercase = mock.Mock()
__lowercase = 500
__lowercase = {}
__lowercase = HTTPError
__lowercase = {}
# Download this model to make sure it's in the cache.
__lowercase = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=_lowerCamelCase ) as mock_head:
__lowercase = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
# A mock response for an HTTP head request to emulate server down
__lowercase = mock.Mock()
__lowercase = 500
__lowercase = {}
__lowercase = HTTPError
__lowercase = {}
# Download this model to make sure it's in the cache.
__lowercase = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=_lowerCamelCase ) as mock_head:
__lowercase = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
# This test is for deprecated behavior and can be removed in v5
try:
__lowercase = tempfile.mktemp()
with open(_lowerCamelCase , "wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" , _lowerCamelCase )
__lowercase = AlbertTokenizer.from_pretrained(_lowerCamelCase )
finally:
os.remove(_lowerCamelCase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json" , "wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" , _lowerCamelCase )
__lowercase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
# This test is for deprecated behavior and can be removed in v5
__lowercase = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class __a ( unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase : str = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def SCREAMING_SNAKE_CASE ( cls ) -> List[Any]:
'''simple docstring'''
__lowercase = TOKEN
HfFolder.save_token(_lowerCamelCase )
@classmethod
def SCREAMING_SNAKE_CASE ( cls ) -> List[str]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase = os.path.join(_lowerCamelCase , "vocab.txt" )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
__lowercase = BertTokenizer(_lowerCamelCase )
tokenizer.push_to_hub("test-tokenizer" , use_auth_token=self._token )
__lowercase = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCamelCase , repo_id="test-tokenizer" , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
__lowercase = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase = os.path.join(_lowerCamelCase , "vocab.txt" )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
__lowercase = BertTokenizer(_lowerCamelCase )
tokenizer.push_to_hub("valid_org/test-tokenizer-org" , use_auth_token=self._token )
__lowercase = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
_lowerCamelCase , repo_id="valid_org/test-tokenizer-org" , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
__lowercase = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase = os.path.join(_lowerCamelCase , "vocab.txt" )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
__lowercase = CustomTokenizer(_lowerCamelCase )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
__lowercase = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase = os.path.join(_lowerCamelCase , "vocab.txt" )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
__lowercase = BertTokenizerFast.from_pretrained(_lowerCamelCase )
bert_tokenizer.save_pretrained(_lowerCamelCase )
__lowercase = CustomTokenizerFast.from_pretrained(_lowerCamelCase )
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
__lowercase = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizerFast" )
__lowercase = AutoTokenizer.from_pretrained(
f'''{USER}/test-dynamic-tokenizer''' , use_fast=_lowerCamelCase , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
class __a ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS]", " This is a ", "extra_id_100"] )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ) , ["A", "BC"] )
self.assertEqual(trie.split("BCA" ) , ["BC", "A"] )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ) , ["AB", "C"] )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ) , ["ABC", "D"] )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
# Even if the offsets are wrong, we necessarily output correct string
# parts.
__lowercase = Trie()
__lowercase = trie.cut_text("ABC" , [0, 0, 2, 1, 2, 3] )
self.assertEqual(_lowerCamelCase , ["AB", "C"] )
| 118
| 1
|
'''simple docstring'''
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__a = get_tests_dir('fixtures/dummy-config.json')
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Dict = 0
def _lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) )
def _lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
_UpperCAmelCase : List[Any] = AutoConfig.from_pretrained("bert-base-uncased" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Tuple = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Dict = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = AutoConfig.for_model("roberta" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
_UpperCAmelCase : Optional[int] = os.path.join(lowerCAmelCase__ , "fake-roberta" )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , "config.json" ) , "w" ) as f:
f.write(json.dumps({} ) )
_UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(type(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
try:
AutoConfig.register("custom" , lowerCAmelCase__ )
# Wrong model type will raise an error
with self.assertRaises(lowerCAmelCase__ ):
AutoConfig.register("model" , lowerCAmelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase__ ):
AutoConfig.register("bert" , lowerCAmelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
_UpperCAmelCase : Optional[int] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def _lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
with self.assertRaisesRegex(
lowerCAmelCase__ , "bert-base is not a local folder and is not a valid model identifier" ):
_UpperCAmelCase : Dict = AutoConfig.from_pretrained("bert-base" )
def _lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
with self.assertRaisesRegex(
lowerCAmelCase__ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
_UpperCAmelCase : Optional[int] = AutoConfig.from_pretrained(lowerCAmelCase__ , revision="aaaaaa" )
def _lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
with self.assertRaisesRegex(
lowerCAmelCase__ , "hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." , ):
_UpperCAmelCase : Dict = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" )
def _lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
with self.assertRaises(lowerCAmelCase__ ):
_UpperCAmelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCAmelCase__ ):
_UpperCAmelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase__ )
_UpperCAmelCase : Dict = AutoConfig.from_pretrained(lowerCAmelCase__ , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(reloaded_config.__class__.__name__ , "NewModelConfig" )
def _lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : List[str] = '''new-model'''
try:
AutoConfig.register("new-model" , lowerCAmelCase__ )
# If remote code is not set, the default is to use local
_UpperCAmelCase : Any = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote code is disabled, we load the local one.
_UpperCAmelCase : Tuple = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote is enabled, we load from the Hub
_UpperCAmelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 711
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Dict = AltDiffusionPipeline
UpperCamelCase_ : int = TEXT_TO_IMAGE_PARAMS
UpperCamelCase_ : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase_ : str = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase_ : int = TEXT_TO_IMAGE_IMAGE_PARAMS
def _lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase : Any = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
_UpperCAmelCase : Tuple = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , )
torch.manual_seed(0 )
_UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_2 , )
_UpperCAmelCase : Dict = CLIPTextModel(lowerCAmelCase__ )
_UpperCAmelCase : str = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
_UpperCAmelCase : List[Any] = 7_7
_UpperCAmelCase : Union[str, Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any]=0 ) -> Tuple:
"""simple docstring"""
if str(lowerCAmelCase__ ).startswith("mps" ):
_UpperCAmelCase : int = torch.manual_seed(lowerCAmelCase__ )
else:
_UpperCAmelCase : List[Any] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def _lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : int = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Union[str, Any] = self.get_dummy_components()
torch.manual_seed(0 )
_UpperCAmelCase : Tuple = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
_UpperCAmelCase : int = RobertaSeriesModelWithTransformation(lowerCAmelCase__ )
_UpperCAmelCase : List[str] = text_encoder
_UpperCAmelCase : int = AltDiffusionPipeline(**lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : Any = self.get_dummy_inputs(lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = "A photo of an astronaut"
_UpperCAmelCase : Tuple = alt_pipe(**lowerCAmelCase__ )
_UpperCAmelCase : str = output.images
_UpperCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_UpperCAmelCase : int = np.array(
[0.574_8162, 0.6044_7145, 0.4882_1217, 0.5010_0636, 0.543_1185, 0.4576_3683, 0.4965_7696, 0.4813_2733, 0.4757_3093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : str = self.get_dummy_components()
_UpperCAmelCase : Any = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
torch.manual_seed(0 )
_UpperCAmelCase : Any = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
_UpperCAmelCase : List[Any] = RobertaSeriesModelWithTransformation(lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = text_encoder
_UpperCAmelCase : Any = AltDiffusionPipeline(**lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = self.get_dummy_inputs(lowerCAmelCase__ )
_UpperCAmelCase : Tuple = alt_pipe(**lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = output.images
_UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_UpperCAmelCase : Any = np.array(
[0.5160_5093, 0.570_7241, 0.4736_5507, 0.5057_8886, 0.563_3877, 0.464_2503, 0.518_2081, 0.4876_3484, 0.4908_4237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , safety_checker=lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = "A painting of a squirrel eating a burger"
_UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
_UpperCAmelCase : Tuple = alt_pipe([prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2_0 , output_type="np" )
_UpperCAmelCase : List[str] = output.images
_UpperCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : List[str] = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Tuple = DDIMScheduler.from_pretrained("BAAI/AltDiffusion" , subfolder="scheduler" )
_UpperCAmelCase : Tuple = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = "A painting of a squirrel eating a burger"
_UpperCAmelCase : str = torch.manual_seed(0 )
_UpperCAmelCase : Tuple = alt_pipe([prompt] , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="numpy" )
_UpperCAmelCase : List[str] = output.images
_UpperCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : str = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 257
| 0
|
'''simple docstring'''
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _lowerCAmelCase ( __magic_name__ : List[str] ) -> Any:
lowercase : Optional[int] =VideoMAEConfig()
set_architecture_configs(__magic_name__ , __magic_name__ )
if "finetuned" not in model_name:
lowercase : str =False
if "finetuned" in model_name:
lowercase : Optional[int] ='''huggingface/label-files'''
if "kinetics" in model_name:
lowercase : Union[str, Any] =400
lowercase : List[Any] ='''kinetics400-id2label.json'''
elif "ssv2" in model_name:
lowercase : Union[str, Any] =174
lowercase : str ='''something-something-v2-id2label.json'''
else:
raise ValueError('''Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.''' )
lowercase : str =json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type='''dataset''' ) , '''r''' ) )
lowercase : str ={int(__magic_name__ ): v for k, v in idalabel.items()}
lowercase : Any =idalabel
lowercase : Any ={v: k for k, v in idalabel.items()}
return config
def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : List[Any] ) -> Optional[int]:
if "small" in model_name:
lowercase : Any =384
lowercase : Any =1536
lowercase : Dict =12
lowercase : Union[str, Any] =16
lowercase : Dict =12
lowercase : Any =3
lowercase : Optional[Any] =192
lowercase : Optional[int] =768
elif "large" in model_name:
lowercase : Any =1024
lowercase : int =4096
lowercase : Any =24
lowercase : Any =16
lowercase : List[str] =12
lowercase : Any =8
lowercase : List[str] =512
lowercase : List[str] =2048
elif "huge" in model_name:
lowercase : int =1280
lowercase : Any =5120
lowercase : int =32
lowercase : List[Any] =16
lowercase : List[Any] =12
lowercase : Optional[Any] =8
lowercase : Dict =640
lowercase : int =2560
elif "base" not in model_name:
raise ValueError('''Model name should include either "small", "base", "large", or "huge"''' )
def _lowerCAmelCase ( __magic_name__ : int ) -> Optional[int]:
if "encoder." in name:
lowercase : List[str] =name.replace('''encoder.''' , '''''' )
if "cls_token" in name:
lowercase : List[str] =name.replace('''cls_token''' , '''videomae.embeddings.cls_token''' )
if "decoder_pos_embed" in name:
lowercase : Union[str, Any] =name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
lowercase : str =name.replace('''pos_embed''' , '''videomae.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
lowercase : int =name.replace('''patch_embed.proj''' , '''videomae.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowercase : Optional[Any] =name.replace('''patch_embed.norm''' , '''videomae.embeddings.norm''' )
if "decoder.blocks" in name:
lowercase : List[Any] =name.replace('''decoder.blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
lowercase : Dict =name.replace('''blocks''' , '''videomae.encoder.layer''' )
if "attn.proj" in name:
lowercase : Tuple =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "bias" not in name:
lowercase : List[Any] =name.replace('''attn''' , '''attention.self''' )
if "attn" in name:
lowercase : List[str] =name.replace('''attn''' , '''attention.attention''' )
if "norm1" in name:
lowercase : Any =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowercase : Union[str, Any] =name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowercase : Dict =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowercase : Optional[int] =name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
lowercase : Union[str, Any] =name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
lowercase : List[str] =name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
lowercase : Dict =name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
lowercase : Tuple =name.replace('''norm.weight''' , '''videomae.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
lowercase : Any =name.replace('''norm.bias''' , '''videomae.layernorm.bias''' )
if "head" in name and "decoder" not in name:
lowercase : int =name.replace('''head''' , '''classifier''' )
return name
def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : Optional[Any] ) -> str:
for key in orig_state_dict.copy().keys():
lowercase : int =orig_state_dict.pop(__magic_name__ )
if key.startswith('''encoder.''' ):
lowercase : Optional[Any] =key.replace('''encoder.''' , '''''' )
if "qkv" in key:
lowercase : Union[str, Any] =key.split('''.''' )
if key.startswith('''decoder.blocks''' ):
lowercase : Optional[int] =config.decoder_hidden_size
lowercase : Optional[int] =int(key_split[2] )
lowercase : List[Any] ='''decoder.decoder_layers.'''
if "weight" in key:
lowercase : Any =val[:dim, :]
lowercase : Optional[Any] =val[dim : dim * 2, :]
lowercase : int =val[-dim:, :]
else:
lowercase : Union[str, Any] =config.hidden_size
lowercase : List[Any] =int(key_split[1] )
lowercase : List[str] ='''videomae.encoder.layer.'''
if "weight" in key:
lowercase : str =val[:dim, :]
lowercase : str =val[dim : dim * 2, :]
lowercase : List[Any] =val[-dim:, :]
else:
lowercase : Any =val
return orig_state_dict
def _lowerCAmelCase ( ) -> Dict:
lowercase : int =hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
lowercase : List[str] =np.load(__magic_name__ )
return list(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : str , __magic_name__ : Optional[Any] ) -> Any:
lowercase : Any =get_videomae_config(__magic_name__ )
if "finetuned" in model_name:
lowercase : Union[str, Any] =VideoMAEForVideoClassification(__magic_name__ )
else:
lowercase : Tuple =VideoMAEForPreTraining(__magic_name__ )
# download original checkpoint, hosted on Google Drive
lowercase : List[str] ='''pytorch_model.bin'''
gdown.cached_download(__magic_name__ , __magic_name__ , quiet=__magic_name__ )
lowercase : int =torch.load(__magic_name__ , map_location='''cpu''' )
if "model" in files:
lowercase : str =files['''model''']
else:
lowercase : List[Any] =files['''module''']
lowercase : Optional[Any] =convert_state_dict(__magic_name__ , __magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
# verify model on basic input
lowercase : Optional[int] =VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
lowercase : Any =prepare_video()
lowercase : Tuple =image_processor(__magic_name__ , return_tensors='''pt''' )
if "finetuned" not in model_name:
lowercase : Union[str, Any] =hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
lowercase : Optional[int] =torch.load(__magic_name__ )
lowercase : Optional[int] =model(**__magic_name__ )
lowercase : Optional[Any] =outputs.logits
lowercase : Tuple =[
'''videomae-small-finetuned-kinetics''',
'''videomae-small-finetuned-ssv2''',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'''videomae-base-short''',
'''videomae-base-short-finetuned-kinetics''',
'''videomae-base''',
'''videomae-base-finetuned-kinetics''',
'''videomae-large''',
'''videomae-large-finetuned-kinetics''',
'''videomae-huge-finetuned-kinetics''',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'''videomae-base-short-ssv2''',
'''videomae-base-short-finetuned-ssv2''',
'''videomae-base-ssv2''',
'''videomae-base-finetuned-ssv2''',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
lowercase : List[Any] =torch.Size([1, 400] )
lowercase : Optional[Any] =torch.tensor([-0.9_2_9_1, -0.4_0_6_1, -0.9_3_0_7] )
elif model_name == "videomae-small-finetuned-ssv2":
lowercase : Union[str, Any] =torch.Size([1, 174] )
lowercase : List[str] =torch.tensor([0.2_6_7_1, -0.4_6_8_9, -0.8_2_3_5] )
elif model_name == "videomae-base":
lowercase : List[Any] =torch.Size([1, 1408, 1536] )
lowercase : Optional[Any] =torch.tensor([[0.7_7_3_9, 0.7_9_6_8, 0.7_0_8_9], [0.6_7_0_1, 0.7_4_8_7, 0.6_2_0_9], [0.4_2_8_7, 0.5_1_5_8, 0.4_7_7_3]] )
elif model_name == "videomae-base-short":
lowercase : Dict =torch.Size([1, 1408, 1536] )
lowercase : int =torch.tensor([[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] )
# we verified the loss both for normalized and unnormalized targets for this one
lowercase : Any =torch.tensor([0.5_1_4_2] ) if config.norm_pix_loss else torch.tensor([0.6_4_6_9] )
elif model_name == "videomae-large":
lowercase : Tuple =torch.Size([1, 1408, 1536] )
lowercase : List[Any] =torch.tensor([[0.7_1_4_9, 0.7_9_9_7, 0.6_9_6_6], [0.6_7_6_8, 0.7_8_6_9, 0.6_9_4_8], [0.5_1_3_9, 0.6_2_2_1, 0.5_6_0_5]] )
elif model_name == "videomae-large-finetuned-kinetics":
lowercase : str =torch.Size([1, 400] )
lowercase : Any =torch.tensor([0.0_7_7_1, 0.0_0_1_1, -0.3_6_2_5] )
elif model_name == "videomae-huge-finetuned-kinetics":
lowercase : Optional[int] =torch.Size([1, 400] )
lowercase : Optional[Any] =torch.tensor([0.2_4_3_3, 0.1_6_3_2, -0.4_8_9_4] )
elif model_name == "videomae-base-short-finetuned-kinetics":
lowercase : List[str] =torch.Size([1, 400] )
lowercase : List[Any] =torch.tensor([0.6_5_8_8, 0.0_9_9_0, -0.2_4_9_3] )
elif model_name == "videomae-base-finetuned-kinetics":
lowercase : Any =torch.Size([1, 400] )
lowercase : Optional[int] =torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] )
elif model_name == "videomae-base-short-ssv2":
lowercase : int =torch.Size([1, 1408, 1536] )
lowercase : Union[str, Any] =torch.tensor([[0.4_7_1_2, 0.5_2_9_6, 0.5_7_8_6], [0.2_2_7_8, 0.2_7_2_9, 0.4_0_2_6], [0.0_3_5_2, 0.0_7_3_0, 0.2_5_0_6]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
lowercase : Tuple =torch.Size([1, 174] )
lowercase : Union[str, Any] =torch.tensor([-0.0_5_3_7, -0.1_5_3_9, -0.3_2_6_6] )
elif model_name == "videomae-base-ssv2":
lowercase : List[Any] =torch.Size([1, 1408, 1536] )
lowercase : Dict =torch.tensor([[0.8_1_3_1, 0.8_7_2_7, 0.8_5_4_6], [0.7_3_6_6, 0.9_3_7_7, 0.8_8_7_0], [0.5_9_3_5, 0.8_8_7_4, 0.8_5_6_4]] )
elif model_name == "videomae-base-finetuned-ssv2":
lowercase : Optional[Any] =torch.Size([1, 174] )
lowercase : List[str] =torch.tensor([0.1_9_6_1, -0.8_3_3_7, -0.6_3_8_9] )
else:
raise ValueError(f'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __magic_name__ , atol=1E-4 )
else:
print('''Logits:''' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __magic_name__ , atol=1E-4 )
print('''Logits ok!''' )
# verify loss, if applicable
if model_name == "videomae-base-short":
lowercase : Optional[Any] =outputs.loss
assert torch.allclose(__magic_name__ , __magic_name__ , atol=1E-4 )
print('''Loss ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__magic_name__ )
model.save_pretrained(__magic_name__ )
if push_to_hub:
print('''Pushing to the hub...''' )
model.push_to_hub(__magic_name__ , organization='''nielsr''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""",
type=str,
help=(
"""URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"""
""" download link."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/Users/nielsrogge/Documents/VideoMAE/Test""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""")
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
UpperCamelCase_ = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 92
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = "dpt"
def __init__( self : Optional[Any] , _UpperCamelCase : Tuple=7_6_8 , _UpperCamelCase : Dict=1_2 , _UpperCamelCase : Union[str, Any]=1_2 , _UpperCamelCase : List[Any]=3_0_7_2 , _UpperCamelCase : Dict="gelu" , _UpperCamelCase : Union[str, Any]=0.0 , _UpperCamelCase : Optional[int]=0.0 , _UpperCamelCase : Optional[int]=0.02 , _UpperCamelCase : List[str]=1e-12 , _UpperCamelCase : Any=3_8_4 , _UpperCamelCase : int=1_6 , _UpperCamelCase : Any=3 , _UpperCamelCase : Dict=False , _UpperCamelCase : str=True , _UpperCamelCase : Union[str, Any]=[2, 5, 8, 1_1] , _UpperCamelCase : List[str]="project" , _UpperCamelCase : Optional[int]=[4, 2, 1, 0.5] , _UpperCamelCase : Dict=[9_6, 1_9_2, 3_8_4, 7_6_8] , _UpperCamelCase : Dict=2_5_6 , _UpperCamelCase : Optional[Any]=-1 , _UpperCamelCase : int=False , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : str=0.4 , _UpperCamelCase : Tuple=2_5_5 , _UpperCamelCase : Union[str, Any]=0.1 , _UpperCamelCase : Tuple=[1, 1_0_2_4, 2_4, 2_4] , _UpperCamelCase : List[str]=[0, 1] , _UpperCamelCase : List[Any]=None , **_UpperCamelCase : Dict , ) ->Any:
super().__init__(**_UpperCamelCase )
snake_case_ = hidden_size
snake_case_ = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
snake_case_ = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
snake_case_ = BitConfig(**_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
snake_case_ = BitConfig(**_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
snake_case_ = backbone_config
else:
raise ValueError(
f'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
snake_case_ = backbone_featmap_shape
snake_case_ = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
snake_case_ = None
snake_case_ = None
snake_case_ = []
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = qkv_bias
snake_case_ = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
snake_case_ = readout_type
snake_case_ = reassemble_factors
snake_case_ = neck_hidden_sizes
snake_case_ = fusion_hidden_size
snake_case_ = head_in_index
snake_case_ = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
snake_case_ = use_auxiliary_head
snake_case_ = auxiliary_loss_weight
snake_case_ = semantic_loss_ignore_index
snake_case_ = semantic_classifier_dropout
def snake_case__( self : List[str] ) ->List[Any]:
snake_case_ = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
snake_case_ = self.backbone_config.to_dict()
snake_case_ = self.__class__.model_type
return output
| 39
| 0
|
import numpy
class _UpperCamelCase :
def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Dict ) -> str:
"""simple docstring"""
UpperCamelCase_ = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
UpperCamelCase_ = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
UpperCamelCase_ = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
UpperCamelCase_ = numpy.random.rand(3 , 1 )
# Real output values provided.
UpperCamelCase_ = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
UpperCamelCase_ = numpy.zeros(output_array.shape )
def lowercase ( self: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
UpperCamelCase_ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
UpperCamelCase_ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def lowercase ( self: Union[str, Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
UpperCamelCase_ = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
UpperCamelCase_ = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def lowercase ( self: List[str] , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: List[Any] ) -> Tuple:
"""simple docstring"""
for iteration in range(1 , iterations + 1 ):
UpperCamelCase_ = self.feedforward()
self.back_propagation()
if give_loss:
UpperCamelCase_ = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f'''Iteration {iteration} Loss: {loss}''' )
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: str ) -> int:
"""simple docstring"""
UpperCamelCase_ = input_arr
UpperCamelCase_ = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
UpperCamelCase_ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
UpperCamelCase_ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowerCAmelCase_ ( UpperCamelCase_ ) -> numpy.ndarray:
return 1 / (1 + numpy.exp(-value ))
def lowerCAmelCase_ ( UpperCamelCase_ ) -> numpy.ndarray:
return (value) * (1 - (value))
def lowerCAmelCase_ ( ) -> int:
UpperCamelCase_ = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
UpperCamelCase_ = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
UpperCamelCase_ = TwoHiddenLayerNeuralNetwork(
input_array=__SCREAMING_SNAKE_CASE , output_array=__SCREAMING_SNAKE_CASE )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=__SCREAMING_SNAKE_CASE , iterations=10 , give_loss=__SCREAMING_SNAKE_CASE )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 701
|
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_=() , UpperCamelCase_=None , UpperCamelCase_="no" , UpperCamelCase_="29500" ) -> Optional[Any]:
UpperCamelCase_ = False
UpperCamelCase_ = False
if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ):
UpperCamelCase_ = True
elif "IPython" in sys.modules:
UpperCamelCase_ = "google.colab" in str(sys.modules["IPython"].get_ipython() )
try:
UpperCamelCase_ = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" , UpperCamelCase_ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside "
"your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if num_processes is None:
UpperCamelCase_ = 8
UpperCamelCase_ = PrepareForLaunch(UpperCamelCase_ , distributed_type="TPU" )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(UpperCamelCase_ , args=UpperCamelCase_ , nprocs=UpperCamelCase_ , start_method="fork" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on one CPU." )
function(*UpperCamelCase_ )
else:
if num_processes is None:
raise ValueError(
"You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized "
"inside your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if torch.cuda.is_initialized():
raise ValueError(
"To launch a multi-GPU training from your notebook, you need to avoid running any instruction "
"using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA "
"function." )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=UpperCamelCase_ , master_addr="127.0.01" , master_port=UpperCamelCase_ , mixed_precision=UpperCamelCase_ ):
UpperCamelCase_ = PrepareForLaunch(UpperCamelCase_ , distributed_type="MULTI_GPU" )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(UpperCamelCase_ , args=UpperCamelCase_ , nprocs=UpperCamelCase_ , start_method="fork" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. "
"This likely stems from an outside import causing issues once the `notebook_launcher()` is called. "
"Please review your imports and test them when running the `notebook_launcher()` to identify "
"which one is problematic." ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
UpperCamelCase_ = "1"
print("Launching training on MPS." )
elif torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on CPU." )
function(*UpperCamelCase_ )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_=() , UpperCamelCase_=2 ) -> Optional[Any]:
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=UpperCamelCase_ , master_addr="127.0.01" , master_port="29500" , accelerate_mixed_precision="no" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="yes" , ):
UpperCamelCase_ = PrepareForLaunch(UpperCamelCase_ , debug=UpperCamelCase_ )
start_processes(UpperCamelCase_ , args=UpperCamelCase_ , nprocs=UpperCamelCase_ , start_method="fork" )
| 371
| 0
|
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
_UpperCAmelCase : int = logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] = {"""vocab_file""": """spiece.model"""}
_UpperCAmelCase : List[str] = {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
}
}
# TODO(PVP) - this should be removed in Transformers v5
_UpperCAmelCase : Dict = {
"""t5-small""": 512,
"""t5-base""": 512,
"""t5-large""": 512,
"""t5-3b""": 512,
"""t5-11b""": 512,
}
_UpperCAmelCase : Dict = """▁"""
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Dict = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : List[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self , snake_case , snake_case="</s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case=100 , snake_case=None , snake_case = None , snake_case=True , **snake_case , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
snake_case_ = [F'''<extra_id_{i}>''' for i in range(snake_case )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
snake_case_ = len(set(filter(lambda snake_case : bool('extra_id' in str(snake_case ) ) , snake_case ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
if legacy:
logger.warning_once(
F'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
' read the related pull request available at https://github.com/huggingface/transformers/pull/24565' )
snake_case_ = legacy
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=snake_case , unk_token=snake_case , pad_token=snake_case , extra_ids=snake_case , additional_special_tokens=snake_case , sp_model_kwargs=self.sp_model_kwargs , legacy=snake_case , **snake_case , )
snake_case_ = vocab_file
snake_case_ = extra_ids
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case )
@staticmethod
def a ( snake_case , snake_case , snake_case ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
snake_case_ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , snake_case , )
return max_model_length
@property
def a ( self ):
return self.sp_model.get_piece_size() + self._extra_ids
def a ( self ):
snake_case_ = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a ( self , snake_case , snake_case = None , snake_case = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(snake_case )) + [1]
return ([0] * len(snake_case )) + [1] + ([0] * len(snake_case )) + [1]
def a ( self ):
return list(
set(filter(lambda snake_case : bool(re.search(R'<extra_id_\d+>' , snake_case ) ) is not None , self.additional_special_tokens ) ) )
def a ( self ):
return [self._convert_token_to_id(snake_case ) for token in self.get_sentinel_tokens()]
def a ( self , snake_case ):
if len(snake_case ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def a ( self , snake_case , snake_case = None ):
snake_case_ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def a ( self , snake_case , snake_case = None ):
snake_case_ = self._add_eos_if_not_present(snake_case )
if token_ids_a is None:
return token_ids_a
else:
snake_case_ = self._add_eos_if_not_present(snake_case )
return token_ids_a + token_ids_a
def __getstate__( self ):
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self , snake_case ):
snake_case_ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a ( self , snake_case , **snake_case ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
snake_case_ = SPIECE_UNDERLINE + text.replace(snake_case , ' ' )
return super().tokenize(snake_case , **snake_case )
def a ( self , snake_case , **snake_case ):
if not self.legacy:
snake_case_ = text.startswith(snake_case )
if is_first:
snake_case_ = text[1:]
snake_case_ = self.sp_model.encode(snake_case , out_type=snake_case )
if not self.legacy and not is_first and not text.startswith(' ' ) and tokens[0].startswith(snake_case ):
snake_case_ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def a ( self , snake_case ):
if token.startswith('<extra_id_' ):
snake_case_ = re.match(R'<extra_id_(\d+)>' , snake_case )
snake_case_ = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(snake_case )
def a ( self , snake_case ):
if index < self.sp_model.get_piece_size():
snake_case_ = self.sp_model.IdToPiece(snake_case )
else:
snake_case_ = F'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def a ( self , snake_case ):
snake_case_ = []
snake_case_ = ''
snake_case_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case ) + token
snake_case_ = True
snake_case_ = []
else:
current_sub_tokens.append(snake_case )
snake_case_ = False
out_string += self.sp_model.decode(snake_case )
return out_string.strip()
def a ( self , snake_case , snake_case = None ):
if not os.path.isdir(snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case , 'wb' ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (out_vocab_file,)
| 362
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''image_processor''', '''tokenizer''']
__SCREAMING_SNAKE_CASE : Tuple = '''AutoImageProcessor'''
__SCREAMING_SNAKE_CASE : Dict = '''AutoTokenizer'''
def __init__( self , snake_case , snake_case ):
super().__init__(snake_case , snake_case )
snake_case_ = self.image_processor
def __call__( self , snake_case=None , snake_case=None , snake_case=None , **snake_case ):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
snake_case_ = self.tokenizer(snake_case , return_tensors=snake_case , **snake_case )
if images is not None:
snake_case_ = self.image_processor(snake_case , return_tensors=snake_case , **snake_case )
if text is not None and images is not None:
snake_case_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case ) , tensor_type=snake_case )
def a ( self , *snake_case , **snake_case ):
return self.tokenizer.batch_decode(*snake_case , **snake_case )
def a ( self , *snake_case , **snake_case ):
return self.tokenizer.decode(*snake_case , **snake_case )
@property
def a ( self ):
return ["input_ids", "attention_mask", "pixel_values"]
| 362
| 1
|
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( a : list[int | float] , a : int , a : int ):
if len(a ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(a )
or left < -len(a )
or right >= len(a )
or right < -len(a )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
a__ = (left + right) >> 1 # the middle
a__ = find_max(a , a , a ) # find max in range[left, mid]
a__ = find_max(a , mid + 1 , a ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 126
|
'''simple docstring'''
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:List[Any] = (KDPMaDiscreteScheduler,)
SCREAMING_SNAKE_CASE:List[Any] = 10
def lowercase__ ( self , **_a ):
"""simple docstring"""
a__ = {
'num_train_timesteps': 1100,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**_a )
return config
def lowercase__ ( self ):
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_a )
def lowercase__ ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_a , beta_end=_a )
def lowercase__ ( self ):
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_a )
def lowercase__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.scheduler_classes[0]
a__ = self.get_scheduler_config(prediction_type='v_prediction' )
a__ = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
a__ = self.dummy_model()
a__ = self.dummy_sample_deter * scheduler.init_noise_sigma
a__ = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
a__ = scheduler.scale_model_input(_a , _a )
a__ = model(_a , _a )
a__ = scheduler.step(_a , _a , _a )
a__ = output.prev_sample
a__ = torch.sum(torch.abs(_a ) )
a__ = torch.mean(torch.abs(_a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934e-07 ) < 1e-2
assert abs(result_mean.item() - 6.1112e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_4286_5017_0972e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0002 ) < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
if torch_device == "mps":
return
a__ = self.scheduler_classes[0]
a__ = self.get_scheduler_config()
a__ = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
a__ = self.dummy_model()
a__ = self.dummy_sample_deter * scheduler.init_noise_sigma
a__ = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
a__ = scheduler.scale_model_input(_a , _a )
a__ = model(_a , _a )
a__ = scheduler.step(_a , _a , _a )
a__ = output.prev_sample
a__ = torch.sum(torch.abs(_a ) )
a__ = torch.mean(torch.abs(_a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
if torch_device == "mps":
return
a__ = self.scheduler_classes[0]
a__ = self.get_scheduler_config()
a__ = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps , device=_a )
a__ = self.dummy_model()
a__ = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
a__ = scheduler.scale_model_input(_a , _a )
a__ = model(_a , _a )
a__ = scheduler.step(_a , _a , _a )
a__ = output.prev_sample
a__ = torch.sum(torch.abs(_a ) )
a__ = torch.mean(torch.abs(_a ) )
if str(_a ).startswith('cpu' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
| 126
| 1
|
import os
import jsonlines
import numpy as np
from tqdm import tqdm
__lowerCAmelCase = 2_0_4_8
__lowerCAmelCase = 4_0_9_6
__lowerCAmelCase = 4_2
__lowerCAmelCase = os.environ.pop("PROCESS_TRAIN", "false")
__lowerCAmelCase = {'''null''': 0, '''short''': 1, '''long''': 2, '''yes''': 3, '''no''': 4}
def __lowerCamelCase ( _lowerCAmelCase ) -> int:
def choose_first(_lowerCAmelCase , _lowerCAmelCase=False ):
assert isinstance(_A , _A )
if len(_A ) == 1:
_UpperCAmelCase = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
_UpperCAmelCase = {k: [a[k]] for k in a}
if len(a["start_token"] ) > 0:
break
return a
_UpperCAmelCase = {"id": example["id"]}
_UpperCAmelCase = example["annotations"]
_UpperCAmelCase = annotation["yes_no_answer"]
if 0 in yes_no_answer or 1 in yes_no_answer:
_UpperCAmelCase = ["yes"] if 1 in yes_no_answer else ["no"]
_UpperCAmelCase = _UpperCAmelCase = []
_UpperCAmelCase = _UpperCAmelCase = []
_UpperCAmelCase = ["<cls>"]
else:
_UpperCAmelCase = ["short"]
_UpperCAmelCase = choose_first(annotation["short_answers"] )
if len(out["start_token"] ) == 0:
# answer will be long if short is not available
_UpperCAmelCase = ["long"]
_UpperCAmelCase = choose_first(annotation["long_answer"] , is_long_answer=_A )
_UpperCAmelCase = []
answer.update(_A )
# disregard some samples
if len(answer["start_token"] ) > 1 or answer["start_token"] == answer["end_token"]:
_UpperCAmelCase = True
else:
_UpperCAmelCase = False
_UpperCAmelCase = ["start_token", "end_token", "start_byte", "end_byte", "text"]
if not all(isinstance(answer[k] , _A ) for k in cols ):
raise ValueError("Issue in ID" , example["id"] )
return answer
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=False ) -> Tuple:
_UpperCAmelCase = _get_single_answer(_A )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
_UpperCAmelCase = example["document"]["tokens"]
_UpperCAmelCase = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
return {
"context": " ".join(_A ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
_UpperCAmelCase = ["start_token", "end_token"]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
_UpperCAmelCase = example["document"]["tokens"]
_UpperCAmelCase = answer["start_token"]
_UpperCAmelCase = answer["end_token"]
_UpperCAmelCase = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
_UpperCAmelCase = " ".join(context[start_token:end_token] )
# checking above code
if assertion:
_UpperCAmelCase = doc["is_html"][answer["start_token"] : answer["end_token"]]
_UpperCAmelCase = doc["token"][answer["start_token"] : answer["end_token"]]
_UpperCAmelCase = " ".join([old[i] for i in range(len(_A ) ) if not is_html[i]] )
if new != old:
print("ID:" , example["id"] )
print("New:" , _A , end="\n" )
print("Old:" , _A , end="\n\n" )
return {
"context": " ".join(_A ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=2_048 , _lowerCAmelCase=4_096 , _lowerCAmelCase=True ) -> str:
_UpperCAmelCase = get_context_and_ans(_A , assertion=_A )
_UpperCAmelCase = out["answer"]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
_UpperCAmelCase = tokenizer(example["question"]["text"] , out["context"] ).input_ids
_UpperCAmelCase = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = input_ids[:q_len]
_UpperCAmelCase = range(_A , len(_A ) , max_length - doc_stride )
for i in doc_start_indices:
_UpperCAmelCase = i + max_length - q_len
_UpperCAmelCase = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["category"][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(_A ),
"end_token": [-100] * len(_A ),
"category": category,
},
}
_UpperCAmelCase = out["context"].split()
_UpperCAmelCase = splitted_context[answer["end_token"]]
_UpperCAmelCase = len(
tokenizer(
" ".join(splitted_context[: answer["start_token"]] ) , add_special_tokens=_A , ).input_ids )
_UpperCAmelCase = len(
tokenizer(" ".join(splitted_context[: answer["end_token"]] ) , add_special_tokens=_A ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
_UpperCAmelCase = len(tokenizer(_A , add_special_tokens=_A ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
_UpperCAmelCase = input_ids[answer["start_token"] : answer["end_token"] + 1] # right & left are inclusive
_UpperCAmelCase = answer["start_token"]
_UpperCAmelCase = answer["end_token"]
if assertion:
_UpperCAmelCase = tokenizer.decode(_A )
if answer["span"] != new:
print("ISSUE IN TOKENIZATION" )
print("OLD:" , answer["span"] )
print("NEW:" , _A , end="\n\n" )
if len(_A ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
_UpperCAmelCase = input_ids[:q_len]
_UpperCAmelCase = range(_A , len(_A ) , max_length - doc_stride )
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = [] # null, yes, no, long, short
for i in doc_start_indices:
_UpperCAmelCase = i + max_length - q_len
_UpperCAmelCase = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
_UpperCAmelCase = start_token - i + q_len
_UpperCAmelCase = end_token - i + q_len
answers_category.append(answer["category"][0] ) # ["short"] -> "short"
else:
_UpperCAmelCase = -100
_UpperCAmelCase = -100
answers_category.append("null" )
_UpperCAmelCase = inputs[-1][start_token : end_token + 1]
answers_start_token.append(_A )
answers_end_token.append(_A )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("ISSUE in strided for ID:" , example["id"] )
print("New:" , tokenizer.decode(_A ) )
print("Old:" , tokenizer.decode(_A ) , end="\n\n" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=2_048 , _lowerCAmelCase=4_096 , _lowerCAmelCase=False ) -> List[Any]:
_UpperCAmelCase = get_strided_contexts_and_ans(
_A , _A , doc_stride=_A , max_length=_A , assertion=_A , )
return example
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
with jsonlines.open(_A , "a" ) as writer:
for example in tqdm(_A , total=len(_A ) , desc="Saving samples ... " ):
_UpperCAmelCase = example["labels"]
for ids, start, end, cat in zip(
example["input_ids"] , labels["start_token"] , labels["end_token"] , labels["category"] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"input_ids": ids,
"start_token": start,
"end_token": end,
"category": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
__lowerCAmelCase = load_dataset("natural_questions")
__lowerCAmelCase = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
__lowerCAmelCase = data['''train''' if PROCESS_TRAIN == '''true''' else '''validation''']
__lowerCAmelCase = {
'''tokenizer''': tokenizer,
'''doc_stride''': DOC_STRIDE,
'''max_length''': MAX_LENGTH,
'''assertion''': False,
}
__lowerCAmelCase = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
__lowerCAmelCase = data.remove_columns(["annotations", "document", "id", "question"])
print(data)
np.random.seed(SEED)
__lowerCAmelCase = '''nq-training.jsonl''' if PROCESS_TRAIN == '''true''' else '''nq-validation.jsonl'''
save_to_disk(data, file_name=cache_file_name)
| 684
|
_SCREAMING_SNAKE_CASE : Dict = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
_SCREAMING_SNAKE_CASE : int = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
_SCREAMING_SNAKE_CASE : Optional[Any] = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
_SCREAMING_SNAKE_CASE : Tuple = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
_SCREAMING_SNAKE_CASE : Optional[int] = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
_SCREAMING_SNAKE_CASE : str = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
_SCREAMING_SNAKE_CASE : Optional[int] = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 493
| 0
|
import argparse
import os
import re
import packaging.version
_lowerCamelCase : List[Any] = '''examples/'''
_lowerCamelCase : str = {
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
_lowerCamelCase : Optional[Any] = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
_lowerCamelCase : int = '''README.md'''
def __lowerCamelCase (UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] ):
with open(UpperCAmelCase__ , "r" , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE = f.read()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = REPLACE_PATTERNS[pattern]
SCREAMING_SNAKE_CASE = replace.replace("VERSION" , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = re_pattern.sub(UpperCAmelCase__ , UpperCAmelCase__ )
with open(UpperCAmelCase__ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : List[str] ):
for folder, directories, fnames in os.walk(UpperCAmelCase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ , pattern="examples" )
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if not patch:
update_version_in_examples(UpperCAmelCase__ )
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = "🤗 Transformers currently provides the following architectures"
SCREAMING_SNAKE_CASE = "1. Want to contribute a new model?"
with open(UpperCAmelCase__ , "r" , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE = f.readlines()
# Find the start of the list.
SCREAMING_SNAKE_CASE = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
SCREAMING_SNAKE_CASE = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(UpperCAmelCase__ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(UpperCAmelCase__ )
def __lowerCamelCase ():
with open(REPLACE_FILES["init"] , "r" ) as f:
SCREAMING_SNAKE_CASE = f.read()
SCREAMING_SNAKE_CASE = REPLACE_PATTERNS["init"][0].search(UpperCAmelCase__ ).groups()[0]
return packaging.version.parse(UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : Dict=False ):
SCREAMING_SNAKE_CASE = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
SCREAMING_SNAKE_CASE = default_version.base_version
elif patch:
SCREAMING_SNAKE_CASE = F"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
SCREAMING_SNAKE_CASE = F"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
SCREAMING_SNAKE_CASE = input(F"Which version are you releasing? [{default_version}]" )
if len(UpperCAmelCase__ ) == 0:
SCREAMING_SNAKE_CASE = default_version
print(F"Updating version to {version}." )
global_version_update(UpperCAmelCase__ , patch=UpperCAmelCase__ )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = get_version()
SCREAMING_SNAKE_CASE = F"{current_version.major}.{current_version.minor + 1}.0.dev0"
SCREAMING_SNAKE_CASE = current_version.base_version
# Check with the user we got that right.
SCREAMING_SNAKE_CASE = input(F"Which version are we developing now? [{dev_version}]" )
if len(UpperCAmelCase__ ) == 0:
SCREAMING_SNAKE_CASE = dev_version
print(F"Updating version to {version}." )
global_version_update(UpperCAmelCase__ )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_lowerCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
_lowerCamelCase : str = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 647
|
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
class lowercase ( a ):
def __init__( self : str , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : float , **_UpperCamelCase : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = feature_size
SCREAMING_SNAKE_CASE = sampling_rate
SCREAMING_SNAKE_CASE = padding_value
SCREAMING_SNAKE_CASE = kwargs.pop("padding_side" , "right" )
SCREAMING_SNAKE_CASE = kwargs.pop("return_attention_mask" , _UpperCamelCase )
super().__init__(**_UpperCamelCase )
def __snake_case( self : List[Any] , _UpperCamelCase : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , _UpperCamelCase : Union[bool, str, PaddingStrategy] = True , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[bool] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , ) -> BatchFeature:
'''simple docstring'''
if isinstance(_UpperCamelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
SCREAMING_SNAKE_CASE = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
F" to this method that includes {self.model_input_names[0]}, but you provided"
F" {list(processed_features.keys() )}" )
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
SCREAMING_SNAKE_CASE = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(_UpperCamelCase ) == 0:
if return_attention_mask:
SCREAMING_SNAKE_CASE = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
SCREAMING_SNAKE_CASE = required_input[0]
if isinstance(_UpperCamelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
SCREAMING_SNAKE_CASE = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = "tf"
elif is_torch_tensor(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = "pt"
elif isinstance(_UpperCamelCase , (int, float, list, tuple, np.ndarray) ):
SCREAMING_SNAKE_CASE = "np"
else:
raise ValueError(
F"type of {first_element} unknown: {type(_UpperCamelCase )}. "
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
SCREAMING_SNAKE_CASE = to_numpy(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = [to_numpy(_UpperCamelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
SCREAMING_SNAKE_CASE = self._get_padding_strategies(padding=_UpperCamelCase , max_length=_UpperCamelCase )
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
if not all(len(_UpperCamelCase ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
SCREAMING_SNAKE_CASE = []
for i in range(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = {k: v[i] for k, v in processed_features.items()}
# truncation
SCREAMING_SNAKE_CASE = self._truncate(
_UpperCamelCase , max_length=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , truncation=_UpperCamelCase , )
truncated_inputs.append(_UpperCamelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
SCREAMING_SNAKE_CASE = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
SCREAMING_SNAKE_CASE = PaddingStrategy.MAX_LENGTH
SCREAMING_SNAKE_CASE = {}
for i in range(_UpperCamelCase ):
# padding
SCREAMING_SNAKE_CASE = self._pad(
truncated_inputs[i] , max_length=_UpperCamelCase , padding_strategy=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_attention_mask=_UpperCamelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
SCREAMING_SNAKE_CASE = []
if value.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE = value.astype(np.floataa )
batch_outputs[key].append(_UpperCamelCase )
return BatchFeature(_UpperCamelCase , tensor_type=_UpperCamelCase )
def __snake_case( self : Union[str, Any] , _UpperCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[bool] = None , ) -> dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
SCREAMING_SNAKE_CASE = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
SCREAMING_SNAKE_CASE = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(_UpperCamelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
SCREAMING_SNAKE_CASE = np.ones(len(_UpperCamelCase ) , dtype=np.intaa )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE = max_length - len(_UpperCamelCase )
if self.padding_side == "right":
if return_attention_mask:
SCREAMING_SNAKE_CASE = np.pad(
processed_features["attention_mask"] , (0, difference) )
SCREAMING_SNAKE_CASE = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
SCREAMING_SNAKE_CASE = np.pad(
_UpperCamelCase , _UpperCamelCase , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
SCREAMING_SNAKE_CASE = np.pad(
processed_features["attention_mask"] , (difference, 0) )
SCREAMING_SNAKE_CASE = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
SCREAMING_SNAKE_CASE = np.pad(
_UpperCamelCase , _UpperCamelCase , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def __snake_case( self : Dict , _UpperCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[bool] = None , ) -> Optional[int]:
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
SCREAMING_SNAKE_CASE = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
SCREAMING_SNAKE_CASE = len(_UpperCamelCase ) > max_length
if needs_to_be_truncated:
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
SCREAMING_SNAKE_CASE = processed_features["attention_mask"][:max_length]
return processed_features
def __snake_case( self : Optional[Any] , _UpperCamelCase : int=False , _UpperCamelCase : Tuple=None ) -> Tuple:
'''simple docstring'''
if padding is not False:
if padding is True:
SCREAMING_SNAKE_CASE = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = PaddingStrategy(_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = padding
else:
SCREAMING_SNAKE_CASE = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 647
| 1
|
"""simple docstring"""
def _A ( __lowercase , __lowercase ):
"""simple docstring"""
return abs(__lowercase ) if a == 0 else greatest_common_divisor(b % a , __lowercase )
def _A ( __lowercase , __lowercase ):
"""simple docstring"""
while y: # --> when y=0 then loop will terminate and return x as final GCD.
lowerCamelCase__ , lowerCamelCase__ = y, x % y
return abs(__lowercase )
def _A ( ):
"""simple docstring"""
try:
lowerCamelCase__ = input("""Enter two integers separated by comma (,): """ ).split(""",""" )
lowerCamelCase__ = int(nums[0] )
lowerCamelCase__ = int(nums[1] )
print(
f"""greatest_common_divisor({num_a}, {num_a}) = """
f"""{greatest_common_divisor(__lowercase , __lowercase )}""" )
print(f"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(__lowercase , __lowercase )}""" )
except (IndexError, UnboundLocalError, ValueError):
print("""Wrong input""" )
if __name__ == "__main__":
main()
| 129
|
"""simple docstring"""
__magic_name__ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def _A ( __lowercase , __lowercase , __lowercase , __lowercase ):
"""simple docstring"""
lowerCamelCase__ = [False] * len(__lowercase )
lowerCamelCase__ = [s]
lowerCamelCase__ = True
while queue:
lowerCamelCase__ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__lowercase )
lowerCamelCase__ = True
lowerCamelCase__ = u
return visited[t]
def _A ( __lowercase , __lowercase , __lowercase ):
"""simple docstring"""
lowerCamelCase__ = [-1] * (len(__lowercase ))
lowerCamelCase__ = 0
lowerCamelCase__ = []
lowerCamelCase__ = [i[:] for i in graph] # Record original cut, copy.
while bfs(__lowercase , __lowercase , __lowercase , __lowercase ):
lowerCamelCase__ = float("""Inf""" )
lowerCamelCase__ = sink
while s != source:
# Find the minimum value in select path
lowerCamelCase__ = min(__lowercase , graph[parent[s]][s] )
lowerCamelCase__ = parent[s]
max_flow += path_flow
lowerCamelCase__ = sink
while v != source:
lowerCamelCase__ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCamelCase__ = parent[v]
for i in range(len(__lowercase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 129
| 1
|
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
def is_in_circle(lowerCAmelCase_ , lowerCAmelCase_) -> bool:
lowerCamelCase_ : Optional[int] = sqrt((x**2) + (y**2))
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
lowerCamelCase_ : str = mean(
int(is_in_circle(uniform(-1.0 , 1.0) , uniform(-1.0 , 1.0)))
for _ in range(lowerCAmelCase_))
# The ratio of the area for circle to square is pi/4.
lowerCamelCase_ : List[Any] = proportion * 4
print(F"""The estimated value of pi is {pi_estimate}""")
print(F"""The numpy value of pi is {pi}""")
print(F"""The total error is {abs(pi - pi_estimate)}""")
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = 1.0 , ):
'''simple docstring'''
return mean(
function_to_integrate(uniform(lowerCAmelCase_ , lowerCAmelCase_)) for _ in range(lowerCAmelCase_)) * (max_value - min_value)
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = 1.0):
'''simple docstring'''
def identity_function(lowerCAmelCase_) -> float:
return x
lowerCamelCase_ : str = area_under_curve_estimator(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
lowerCamelCase_ : int = (max_value * max_value - min_value * min_value) / 2
print("******************")
print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""")
print(F"""Estimated value is {estimated_value}""")
print(F"""Expected value is {expected_value}""")
print(F"""Total error is {abs(estimated_value - expected_value)}""")
print("******************")
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
def function_to_integrate(lowerCAmelCase_) -> float:
return sqrt(4.0 - x * x)
lowerCamelCase_ : Dict = area_under_curve_estimator(
lowerCAmelCase_ , lowerCAmelCase_ , 0.0 , 2.0)
print("******************")
print("Estimating pi using area_under_curve_estimator")
print(F"""Estimated value is {estimated_value}""")
print(F"""Expected value is {pi}""")
print(F"""Total error is {abs(estimated_value - pi)}""")
print("******************")
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73
|
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
lowerCamelCase_ : Any = set()
# Replace all the whitespace in our sentence
lowerCamelCase_ : str = input_str.replace(" " , "")
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower())
return len(lowerCAmelCase_) == 26
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
lowerCamelCase_ : List[Any] = [False] * 26
for char in input_str:
if char.islower():
lowerCamelCase_ : List[Any] = True
elif char.isupper():
lowerCamelCase_ : Optional[int] = True
return all(lowerCAmelCase_)
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()}) == 26
def __magic_name__ ( ):
'''simple docstring'''
from timeit import timeit
lowerCamelCase_ : Optional[int] = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("is_pangram()" , setup=lowerCAmelCase_))
print(timeit("is_pangram_faster()" , setup=lowerCAmelCase_))
print(timeit("is_pangram_fastest()" , setup=lowerCAmelCase_))
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 73
| 1
|
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A_ :
"""simple docstring"""
def __init__( self : str ,__A : Optional[int] ,__A : str=2 ,__A : List[str]=3 ,__A : List[Any]=4 ,__A : List[Any]=2 ,__A : List[str]=7 ,__A : Dict=True ,__A : Union[str, Any]=True ,__A : Optional[Any]=True ,__A : Optional[Any]=True ,__A : int=99 ,__A : str=36 ,__A : Tuple=2 ,__A : int=4 ,__A : int=37 ,__A : Union[str, Any]="gelu" ,__A : str=0.1 ,__A : Union[str, Any]=0.1 ,__A : Any=512 ,__A : Optional[int]=16 ,__A : List[Any]=2 ,__A : int=0.02 ,__A : Union[str, Any]=6 ,__A : Dict=6 ,__A : str=3 ,__A : Optional[Any]=4 ,__A : str=None ,__A : str=1000 ,) -> Dict:
_lowercase = parent
_lowercase = batch_size
_lowercase = num_channels
_lowercase = image_size
_lowercase = patch_size
_lowercase = is_training
_lowercase = use_input_mask
_lowercase = use_token_type_ids
_lowercase = use_labels
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = type_vocab_size
_lowercase = type_sequence_label_size
_lowercase = initializer_range
_lowercase = coordinate_size
_lowercase = shape_size
_lowercase = num_labels
_lowercase = num_choices
_lowercase = scope
_lowercase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_lowercase = text_seq_length
_lowercase = (image_size // patch_size) ** 2 + 1
_lowercase = self.text_seq_length + self.image_seq_length
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
_lowercase = ids_tensor([self.batch_size, self.text_seq_length] ,self.vocab_size )
_lowercase = ids_tensor([self.batch_size, self.text_seq_length, 4] ,self.range_bbox )
_lowercase = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowercase = bbox[i, j, 3]
_lowercase = bbox[i, j, 1]
_lowercase = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowercase = bbox[i, j, 2]
_lowercase = bbox[i, j, 0]
_lowercase = tmp_coordinate
_lowercase = tf.constant(__A )
_lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase = None
if self.use_input_mask:
_lowercase = random_attention_mask([self.batch_size, self.text_seq_length] )
_lowercase = None
if self.use_token_type_ids:
_lowercase = ids_tensor([self.batch_size, self.text_seq_length] ,self.type_vocab_size )
_lowercase = None
_lowercase = None
if self.use_labels:
_lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowercase = ids_tensor([self.batch_size, self.text_seq_length] ,self.num_labels )
_lowercase = LayoutLMvaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,coordinate_size=self.coordinate_size ,shape_size=self.shape_size ,input_size=self.image_size ,patch_size=self.patch_size ,)
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __UpperCAmelCase ( self : Optional[int] ,__A : List[Any] ,__A : int ,__A : List[Any] ,__A : Tuple ,__A : Optional[Any] ,__A : List[Any] ) -> Optional[Any]:
_lowercase = TFLayoutLMvaModel(config=__A )
# text + image
_lowercase = model(__A ,pixel_values=__A ,training=__A )
_lowercase = model(
__A ,bbox=__A ,pixel_values=__A ,attention_mask=__A ,token_type_ids=__A ,training=__A ,)
_lowercase = model(__A ,bbox=__A ,pixel_values=__A ,training=__A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
# text only
_lowercase = model(__A ,training=__A )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_lowercase = model({'pixel_values': pixel_values} ,training=__A )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.image_seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : int ,__A : Optional[Any] ,__A : List[str] ,__A : List[str] ,__A : List[Any] ,__A : int ,__A : Union[str, Any] ,__A : List[str] ) -> Tuple:
_lowercase = self.num_labels
_lowercase = TFLayoutLMvaForSequenceClassification(config=__A )
_lowercase = model(
__A ,bbox=__A ,pixel_values=__A ,attention_mask=__A ,token_type_ids=__A ,labels=__A ,training=__A ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self : Any ,__A : Any ,__A : Optional[Any] ,__A : Optional[int] ,__A : Dict ,__A : List[str] ,__A : Tuple ,__A : Optional[Any] ) -> Optional[Any]:
_lowercase = self.num_labels
_lowercase = TFLayoutLMvaForTokenClassification(config=__A )
_lowercase = model(
__A ,bbox=__A ,pixel_values=__A ,attention_mask=__A ,token_type_ids=__A ,labels=__A ,training=__A ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.text_seq_length, self.num_labels) )
def __UpperCAmelCase ( self : int ,__A : Optional[Any] ,__A : Union[str, Any] ,__A : int ,__A : List[Any] ,__A : str ,__A : Optional[Any] ,__A : Any ) -> Optional[Any]:
_lowercase = 2
_lowercase = TFLayoutLMvaForQuestionAnswering(config=__A )
_lowercase = model(
__A ,bbox=__A ,pixel_values=__A ,attention_mask=__A ,token_type_ids=__A ,start_positions=__A ,end_positions=__A ,training=__A ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self : int ) -> str:
_lowercase = self.prepare_config_and_inputs()
((_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase)) = config_and_inputs
_lowercase = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class A_ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE_ : Dict = (
{'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : Optional[int] = False
def __UpperCAmelCase ( self : List[Any] ,__A : str ,__A : Any ,__A : Optional[int] ,__A : Tuple ,__A : str ) -> Any:
return True
def __UpperCAmelCase ( self : Dict ,__A : Union[str, Any] ,__A : Tuple ,__A : Optional[Any]=False ) -> dict:
_lowercase = copy.deepcopy(__A )
if model_class in get_values(__A ):
_lowercase = {
k: tf.tile(tf.expand_dims(__A ,1 ) ,(1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(__A ,tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__A ):
_lowercase = tf.ones(self.model_tester.batch_size ,dtype=tf.intaa )
elif model_class in get_values(__A ):
_lowercase = tf.zeros(self.model_tester.batch_size ,dtype=tf.intaa )
_lowercase = tf.zeros(self.model_tester.batch_size ,dtype=tf.intaa )
elif model_class in get_values(__A ):
_lowercase = tf.zeros(self.model_tester.batch_size ,dtype=tf.intaa )
elif model_class in get_values(__A ):
_lowercase = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) ,dtype=tf.intaa )
return inputs_dict
def __UpperCAmelCase ( self : int ) -> Tuple:
_lowercase = TFLayoutLMvaModelTester(self )
_lowercase = ConfigTester(self ,config_class=__A ,hidden_size=37 )
def __UpperCAmelCase ( self : List[str] ) -> Any:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Any ) -> List[str]:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
if getattr(__A ,'hf_compute_loss' ,__A ):
# The number of elements in the loss should be the same as the number of elements in the label
_lowercase = self._prepare_for_class(inputs_dict.copy() ,__A ,return_labels=__A )
_lowercase = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() ,reverse=__A )[0]
]
_lowercase = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
_lowercase = self._prepare_for_class(inputs_dict.copy() ,__A ,return_labels=__A )
_lowercase = prepared_for_class.pop('input_ids' )
_lowercase = model(__A ,**__A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
_lowercase = self._prepare_for_class(inputs_dict.copy() ,__A ,return_labels=__A )
_lowercase = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
_lowercase = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
_lowercase = -100
_lowercase = tf.convert_to_tensor(__A )
_lowercase = model(__A ,**__A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
_lowercase = self._prepare_for_class(inputs_dict.copy() ,__A ,return_labels=__A )
_lowercase = model(__A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
_lowercase = self._prepare_for_class(inputs_dict.copy() ,__A ,return_labels=__A )
# Get keys that were added with the _prepare_for_class function
_lowercase = prepared_for_class.keys() - inputs_dict.keys()
_lowercase = inspect.signature(model.call ).parameters
_lowercase = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
_lowercase = {0: 'input_ids'}
for label_key in label_keys:
_lowercase = signature_names.index(__A )
_lowercase = label_key
_lowercase = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
_lowercase = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
_lowercase = prepared_for_class[value]
_lowercase = tuple(__A )
# Send to model
_lowercase = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__A ,__A ,__A ,__A ,__A ,__A )
def __UpperCAmelCase ( self : Tuple ) -> int:
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowercase = type
self.model_tester.create_and_check_model(__A ,__A ,__A ,__A ,__A ,__A )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
__A ,__A ,__A ,__A ,__A ,__A ,__A )
def __UpperCAmelCase ( self : Dict ) -> Dict:
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
__A ,__A ,__A ,__A ,__A ,__A ,__A )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
__A ,__A ,__A ,__A ,__A ,__A ,__A )
@slow
def __UpperCAmelCase ( self : Dict ) -> Dict:
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase = TFLayoutLMvaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
_lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class A_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
return LayoutLMvaImageProcessor(apply_ocr=__A ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self : int ) -> Any:
_lowercase = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
_lowercase = self.default_image_processor
_lowercase = prepare_img()
_lowercase = image_processor(images=__A ,return_tensors='tf' ).pixel_values
_lowercase = tf.constant([[1, 2]] )
_lowercase = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) ,axis=0 )
# forward pass
_lowercase = model(input_ids=__A ,bbox=__A ,pixel_values=__A ,training=__A )
# verify the logits
_lowercase = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape ,__A )
_lowercase = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] ,__A ,atol=1e-4 ) )
| 67
|
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> list:
_lowercase = [0] * len(snake_case__ )
for i in range(1 , len(snake_case__ ) ):
# use last results for better performance - dynamic programming
_lowercase = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_lowercase = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_lowercase = j
return prefix_result
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> int:
return max(prefix_function(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 67
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : int =42
a_ : List[str] =42
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase : int ):
'''simple docstring'''
_snake_case : list[list[Edge]] = [[] for _ in range(__lowerCamelCase )]
_snake_case : int = size
def __getitem__( self : Optional[int] , UpperCamelCase : int ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return self._size
def UpperCamelCase_ ( self : Dict , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(__lowerCamelCase , __lowerCamelCase ) )
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
_snake_case : List[Any] = deque([start_vertex] )
_snake_case : list[int | None] = [None] * self.size
_snake_case : str = 0
while queue:
_snake_case : Union[str, Any] = queue.popleft()
_snake_case : int = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_snake_case : Tuple = current_distance + edge.weight
_snake_case : str = distances[edge.destination_vertex]
if (
isinstance(__lowerCamelCase , __lowerCamelCase )
and new_distance >= dest_vertex_distance
):
continue
_snake_case : Tuple = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717
|
def lowerCamelCase_ ( lowerCAmelCase: int )-> int:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Union[str, Any] = F"""Input value of [number={number}] must be an integer"""
raise TypeError(lowerCAmelCase )
if number < 1:
_snake_case : int = F"""Input value of [number={number}] must be > 0"""
raise ValueError(lowerCAmelCase )
_snake_case : int = 1
for i in range(1 , lowerCAmelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669
| 0
|
from __future__ import annotations
import os
from collections.abc import Mapping
_lowerCAmelCase : Any = tuple[int, int]
class __magic_name__ :
"""simple docstring"""
def __init__( self :Union[str, Any] , snake_case :set[int] , snake_case :Mapping[EdgeT, int] ):
'''simple docstring'''
A_ : set[int] = vertices
A_ : dict[EdgeT, int] = {
(min(a_ ), max(a_ )): weight for edge, weight in edges.items()
}
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :EdgeT , snake_case :int ):
'''simple docstring'''
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
A_ : Optional[Any] = weight
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : Graph = Graph({min(self.vertices )} , {} )
A_ : EdgeT
A_ : int
A_ : EdgeT
A_ : int
while len(subgraph.vertices ) < len(self.vertices ):
A_ : Optional[int] = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
A_ : Optional[Any] = edge
A_ : Tuple = weight
subgraph.add_edge(a_ , a_ )
return subgraph
def __snake_case ( _lowerCAmelCase : str = "p107_network.txt" ) -> int:
A_ : str = os.path.abspath(os.path.dirname(lowerCAmelCase__ ) )
A_ : str = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
A_ : dict[EdgeT, int] = {}
A_ : list[str]
A_ : int
A_ : int
with open(lowerCAmelCase__ ) as f:
A_ : Optional[int] = f.read().strip().split("\n" )
A_ : Any = [line.split("," ) for line in data]
for edgea in range(1 , len(lowerCAmelCase__ ) ):
for edgea in range(lowerCAmelCase__ ):
if adjaceny_matrix[edgea][edgea] != "-":
A_ : List[str] = int(adjaceny_matrix[edgea][edgea] )
A_ : Graph = Graph(set(range(len(lowerCAmelCase__ ) ) ) , lowerCAmelCase__ )
A_ : Graph = graph.prims_algorithm()
A_ : int = sum(graph.edges.values() )
A_ : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F'''{solution() = }''')
| 454
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
__lowerCamelCase : int = ViTImageProcessor if is_vision_available() else None
@property
def UpperCAmelCase ( self : List[str] ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
a__ : Optional[Any] = (3, 32, 1_28)
a__ : Any = tempfile.mkdtemp()
# fmt: off
a__ : str = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
a__ : List[Any] = dict(zip(a_ , range(len(a_ ) ) ) )
a__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
a__ : Any = {
"do_normalize": False,
"do_resize": True,
"image_processor_type": "ViTImageProcessor",
"resample": 3,
"size": {"height": 32, "width": 1_28},
}
a__ : Tuple = os.path.join(self.tmpdirname , a_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(a_ , a_ )
def UpperCAmelCase ( self : List[str] , **a_ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **a_ )
def UpperCAmelCase ( self : List[Any] , **a_ : Union[str, Any] ) -> Dict:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **a_ )
def UpperCAmelCase ( self : Dict ) -> int:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self : List[str] ) -> Any:
'''simple docstring'''
a__ : Any = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )
a__ : List[str] = Image.fromarray(np.moveaxis(a_ , 0 , -1 ) )
return image_input
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
a__ : List[Any] = self.get_tokenizer()
a__ : Any = self.get_image_processor()
a__ : Optional[int] = MgpstrProcessor(tokenizer=a_ , image_processor=a_ )
processor.save_pretrained(self.tmpdirname )
a__ : Optional[Any] = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=a_ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , a_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , a_ )
def UpperCAmelCase ( self : Union[str, Any] ) -> str:
'''simple docstring'''
a__ : Optional[Any] = self.get_tokenizer()
a__ : Tuple = self.get_image_processor()
a__ : Optional[int] = MgpstrProcessor(tokenizer=a_ , image_processor=a_ )
processor.save_pretrained(self.tmpdirname )
a__ : Union[str, Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
a__ : Any = self.get_image_processor(do_normalize=a_ , padding_value=1.0 )
a__ : Dict = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=a_ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , a_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a_ )
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
'''simple docstring'''
a__ : str = self.get_image_processor()
a__ : Union[str, Any] = self.get_tokenizer()
a__ : Any = MgpstrProcessor(tokenizer=a_ , image_processor=a_ )
a__ : List[str] = self.prepare_image_inputs()
a__ : List[Any] = image_processor(a_ , return_tensors="np" )
a__ : Optional[Any] = processor(images=a_ , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase ( self : Tuple ) -> int:
'''simple docstring'''
a__ : Optional[Any] = self.get_image_processor()
a__ : List[Any] = self.get_tokenizer()
a__ : int = MgpstrProcessor(tokenizer=a_ , image_processor=a_ )
a__ : List[str] = "test"
a__ : Any = processor(text=a_ )
a__ : Tuple = tokenizer(a_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
a__ : Tuple = self.get_image_processor()
a__ : List[str] = self.get_tokenizer()
a__ : List[Any] = MgpstrProcessor(tokenizer=a_ , image_processor=a_ )
a__ : str = "test"
a__ : str = self.prepare_image_inputs()
a__ : Any = processor(text=a_ , images=a_ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "labels"] )
# test if it raises when no input is passed
with pytest.raises(a_ ):
processor()
def UpperCAmelCase ( self : List[str] ) -> int:
'''simple docstring'''
a__ : Any = self.get_image_processor()
a__ : Tuple = self.get_tokenizer()
a__ : Dict = MgpstrProcessor(tokenizer=a_ , image_processor=a_ )
a__ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
a__ : Any = processor.char_decode(a_ )
a__ : str = tokenizer.batch_decode(a_ )
a__ : Union[str, Any] = [seq.replace(" " , "" ) for seq in decoded_tok]
self.assertListEqual(a_ , a_ )
def UpperCAmelCase ( self : List[Any] ) -> Dict:
'''simple docstring'''
a__ : List[str] = self.get_image_processor()
a__ : Any = self.get_tokenizer()
a__ : Any = MgpstrProcessor(tokenizer=a_ , image_processor=a_ )
a__ : str = None
a__ : Optional[Any] = self.prepare_image_inputs()
a__ : Optional[int] = processor(text=a_ , images=a_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
a__ : List[Any] = self.get_image_processor()
a__ : List[str] = self.get_tokenizer()
a__ : Optional[Any] = MgpstrProcessor(tokenizer=a_ , image_processor=a_ )
a__ : List[str] = torch.randn(1 , 27 , 38 )
a__ : Tuple = torch.randn(1 , 27 , 5_02_57 )
a__ : List[Any] = torch.randn(1 , 27 , 3_05_22 )
a__ : Optional[Any] = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["generated_text", "scores", "char_preds", "bpe_preds", "wp_preds"] )
| 642
| 0
|
'''simple docstring'''
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 714
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( snake_case : int = 10**9 ) -> int:
"""simple docstring"""
a : List[str] = 1
a : Any = 2
a : List[Any] = 0
a : Optional[Any] = 0
a : Union[str, Any] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
a : Union[str, Any] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 610
| 0
|
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class __magic_name__ (snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : List[str] = BertJapaneseTokenizer
__lowercase : Optional[Any] = False
__lowercase : List[Any] = True
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
super().setUp()
snake_case__ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''こんにちは''',
'''こん''',
'''にちは''',
'''ばんは''',
'''##こん''',
'''##にちは''',
'''##ばんは''',
'''世界''',
'''##世界''',
'''、''',
'''##、''',
'''。''',
'''##。''',
]
snake_case__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE__ ( self:int , _a:Dict ):
snake_case__ = '''こんにちは、世界。 \nこんばんは、世界。'''
snake_case__ = '''こんにちは 、 世界 。 こんばんは 、 世界 。'''
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:Optional[int] ):
snake_case__ , snake_case__ = self.get_input_output_texts(_a )
snake_case__ = tokenizer.encode(_a , add_special_tokens=_a )
snake_case__ = tokenizer.decode(_a , clean_up_tokenization_spaces=_a )
return text, ids
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE__ ( self:int ):
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = self.tokenizer_class(self.vocab_file )
snake_case__ = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' )
self.assertListEqual(_a , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''' )
self.assertIsNotNone(_a )
snake_case__ = '''こんにちは、世界。\nこんばんは、世界。'''
snake_case__ = tokenizer.tokenize(_a )
self.assertListEqual(_a , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
snake_case__ = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(_a , '''wb''' ) as handle:
pickle.dump(_a , _a )
with open(_a , '''rb''' ) as handle:
snake_case__ = pickle.load(_a )
snake_case__ = tokenizer_new.tokenize(_a )
self.assertListEqual(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = MecabTokenizer(mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
try:
snake_case__ = MecabTokenizer(mecab_dic='''unidic_lite''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
try:
snake_case__ = MecabTokenizer(mecab_dic='''unidic''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = MecabTokenizer(do_lower_case=_a , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
try:
snake_case__ = MecabTokenizer(
do_lower_case=_a , normalize_text=_a , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = MecabTokenizer(normalize_text=_a , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , )
@require_sudachi
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''' )
self.assertIsNotNone(_a )
snake_case__ = '''こんにちは、世界。\nこんばんは、世界。'''
snake_case__ = tokenizer.tokenize(_a )
self.assertListEqual(_a , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
snake_case__ = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(_a , '''wb''' ) as handle:
pickle.dump(_a , _a )
with open(_a , '''rb''' ) as handle:
snake_case__ = pickle.load(_a )
snake_case__ = tokenizer_new.tokenize(_a )
self.assertListEqual(_a , _a )
@require_sudachi
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ = SudachiTokenizer(sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国''', '''人''', '''参政''', '''権'''] )
@require_sudachi
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人''', '''参政権'''] )
@require_sudachi
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人参政権'''] )
@require_sudachi
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = SudachiTokenizer(do_lower_case=_a , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = SudachiTokenizer(normalize_text=_a , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = SudachiTokenizer(trim_whitespace=_a , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''' )
self.assertIsNotNone(_a )
snake_case__ = '''こんにちは、世界。\nこんばんは、世界。'''
snake_case__ = tokenizer.tokenize(_a )
self.assertListEqual(_a , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
snake_case__ = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(_a , '''wb''' ) as handle:
pickle.dump(_a , _a )
with open(_a , '''rb''' ) as handle:
snake_case__ = pickle.load(_a )
snake_case__ = tokenizer_new.tokenize(_a )
self.assertListEqual(_a , _a )
@require_jumanpp
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ = JumanppTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = JumanppTokenizer(normalize_text=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = JumanppTokenizer(trim_whitespace=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ) , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''']
snake_case__ = {}
for i, token in enumerate(_a ):
snake_case__ = i
snake_case__ = WordpieceTokenizer(vocab=_a , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こんにちは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは''' ) , ['''こん''', '''##ばんは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ) , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] )
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' )
snake_case__ = tokenizer.subword_tokenizer
snake_case__ = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' )
self.assertListEqual(_a , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] )
snake_case__ = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' )
self.assertListEqual(_a , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' )
snake_case__ = tokenizer.encode('''ありがとう。''' , add_special_tokens=_a )
snake_case__ = tokenizer.encode('''どういたしまして。''' , add_special_tokens=_a )
snake_case__ = tokenizer.build_inputs_with_special_tokens(_a )
snake_case__ = tokenizer.build_inputs_with_special_tokens(_a , _a )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __magic_name__ (snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Dict = BertJapaneseTokenizer
__lowercase : List[str] = False
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
super().setUp()
snake_case__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
snake_case__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE__ ( self:List[str] , **_a:Tuple ):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:Optional[Any] ):
snake_case__ = '''こんにちは、世界。 \nこんばんは、世界。'''
snake_case__ = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'''
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self:Any ):
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE__ ( self:str ):
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''' )
snake_case__ = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' )
self.assertListEqual(
_a , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
snake_case__ = {}
for i, token in enumerate(_a ):
snake_case__ = i
snake_case__ = CharacterTokenizer(vocab=_a , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''] )
self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] )
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' )
snake_case__ = tokenizer.encode('''ありがとう。''' , add_special_tokens=_a )
snake_case__ = tokenizer.encode('''どういたしまして。''' , add_special_tokens=_a )
snake_case__ = tokenizer.build_inputs_with_special_tokens(_a )
snake_case__ = tokenizer.build_inputs_with_special_tokens(_a , _a )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = '''cl-tohoku/bert-base-japanese'''
snake_case__ = AutoTokenizer.from_pretrained(_a )
self.assertIsInstance(_a , _a )
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = '''cl-tohoku/bert-base-japanese'''
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertTokenizer.from_pretrained(_a )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
snake_case__ = '''bert-base-cased'''
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertJapaneseTokenizer.from_pretrained(_a )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
| 33
|
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : str = (CMStochasticIterativeScheduler,)
__lowercase : List[str] = 10
def SCREAMING_SNAKE_CASE__ ( self:int , **_a:Optional[int] ):
snake_case__ = {
'''num_train_timesteps''': 2_01,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
config.update(**_a )
return config
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = 10
snake_case__ = self.get_scheduler_config()
snake_case__ = self.scheduler_classes[0](**_a )
scheduler.set_timesteps(_a )
snake_case__ = scheduler.timesteps[0]
snake_case__ = scheduler.timesteps[1]
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
snake_case__ = scheduler.step(_a , _a , _a ).prev_sample
snake_case__ = scheduler.step(_a , _a , _a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_a )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=_a )
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = 1
scheduler.set_timesteps(_a )
snake_case__ = scheduler.timesteps
snake_case__ = torch.manual_seed(0 )
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(_a ):
# 1. scale model input
snake_case__ = scheduler.scale_model_input(_a , _a )
# 2. predict noise residual
snake_case__ = model(_a , _a )
# 3. predict previous sample x_t-1
snake_case__ = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
snake_case__ = pred_prev_sample
snake_case__ = torch.sum(torch.abs(_a ) )
snake_case__ = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 192.7614 ) < 1e-2
assert abs(result_mean.item() - 0.2510 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = [1_06, 0]
scheduler.set_timesteps(timesteps=_a )
snake_case__ = scheduler.timesteps
snake_case__ = torch.manual_seed(0 )
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
snake_case__ = scheduler.scale_model_input(_a , _a )
# 2. predict noise residual
snake_case__ = model(_a , _a )
# 3. predict previous sample x_t-1
snake_case__ = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
snake_case__ = pred_prev_sample
snake_case__ = torch.sum(torch.abs(_a ) )
snake_case__ = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 347.6357 ) < 1e-2
assert abs(result_mean.item() - 0.4527 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = [39, 30, 12, 15, 0]
with self.assertRaises(_a , msg='''`timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_a )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = [39, 30, 12, 1, 0]
snake_case__ = len(_a )
with self.assertRaises(_a , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_a , timesteps=_a )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_a , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_a )
| 33
| 1
|
'''simple docstring'''
from __future__ import annotations
import bisect
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0 , lowerCAmelCase_ = -1 ):
"""simple docstring"""
if hi < 0:
_snake_case : List[str] = len(lowerCAmelCase_ )
while lo < hi:
_snake_case : List[str] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
_snake_case : List[Any] = mid + 1
else:
_snake_case : str = mid
return lo
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0 , lowerCAmelCase_ = -1 ):
"""simple docstring"""
if hi < 0:
_snake_case : int = len(lowerCAmelCase_ )
while lo < hi:
_snake_case : Tuple = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
_snake_case : Any = mid + 1
else:
_snake_case : List[Any] = mid
return lo
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0 , lowerCAmelCase_ = -1 ):
"""simple docstring"""
sorted_collection.insert(bisect_left(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0 , lowerCAmelCase_ = -1 ):
"""simple docstring"""
sorted_collection.insert(bisect_right(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Dict = 0
_snake_case : int = len(lowerCAmelCase_ ) - 1
while left <= right:
_snake_case : Union[str, Any] = left + (right - left) // 2
_snake_case : Dict = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
_snake_case : Any = midpoint - 1
else:
_snake_case : Dict = midpoint + 1
return None
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Dict = bisect.bisect_left(lowerCAmelCase_ , lowerCAmelCase_ )
if index != len(lowerCAmelCase_ ) and sorted_collection[index] == item:
return index
return None
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if right < left:
return None
_snake_case : Union[str, Any] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , midpoint - 1 )
else:
return binary_search_by_recursion(lowerCAmelCase_ , lowerCAmelCase_ , midpoint + 1 , lowerCAmelCase_ )
if __name__ == "__main__":
UpperCAmelCase : int = input('Enter numbers separated by comma:\n').strip()
UpperCAmelCase : int = sorted(int(item) for item in user_input.split(','))
UpperCAmelCase : str = int(input('Enter a single number to be found in the list:\n'))
UpperCAmelCase : List[Any] = binary_search(collection, target)
if result is None:
print(F"""{target} was not found in {collection}.""")
else:
print(F"""{target} was found at position {result} in {collection}.""")
| 47
|
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
UpperCAmelCase : str = logging.getLogger(__name__)
UpperCAmelCase : Dict = 5_0 # max width of layer names
UpperCAmelCase : Union[str, Any] = 7_0 # max width of quantizer names
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Dict = parser.add_argument_group('''quant_trainer arguments''' )
group.add_argument('''--wprec''' , type=lowerCAmelCase_ , default=8 , help='''weight precision''' )
group.add_argument('''--aprec''' , type=lowerCAmelCase_ , default=8 , help='''activation precision''' )
group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' )
group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' )
group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' )
group.add_argument('''--quant-disable-keyword''' , type=lowerCAmelCase_ , nargs='''+''' , help='''disable quantizers by keyword''' )
group.add_argument('''--quant-disable-layer-module''' , type=lowerCAmelCase_ , help='''disable quantizers by keyword under layer.''' )
group.add_argument('''--quant-enable-layer-module''' , type=lowerCAmelCase_ , help='''enable quantizers by keyword under layer''' )
group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' )
group.add_argument('''--percentile''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help='''percentile for PercentileCalibrator''' )
group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' )
group.add_argument('''--clip-gelu''' , metavar='''N''' , type=lowerCAmelCase_ , help='''clip gelu output maximum value to N''' )
group.add_argument(
'''--recalibrate-weights''' , action='''store_true''' , help=(
'''recalibrate weight amaxes by taking the max of the weights.'''
''' amaxes will be computed with the current quantization granularity (axis).'''
) , )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
if args.calibrator == "max":
_snake_case : Optional[int] = '''max'''
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('''Specify --percentile when using percentile calibrator''' )
_snake_case : Tuple = '''histogram'''
elif args.calibrator == "mse":
_snake_case : int = '''histogram'''
else:
raise ValueError(f'''Invalid calibrator {args.calibrator}''' )
_snake_case : Tuple = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCAmelCase_ )
_snake_case : str = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(lowerCAmelCase_ )
quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ):
"""simple docstring"""
logger.info('''Configuring Model for Quantization''' )
logger.info(f'''using quantization package {pytorch_quantization.__file__}''' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(lowerCAmelCase_ , ['''embeddings'''] , which='''weight''' , _disabled=lowerCAmelCase_ )
if args.quant_disable:
set_quantizer_by_name(lowerCAmelCase_ , [''''''] , _disabled=lowerCAmelCase_ )
if args.quant_disable_keyword:
set_quantizer_by_name(lowerCAmelCase_ , args.quant_disable_keyword , _disabled=lowerCAmelCase_ )
if args.quant_disable_layer_module:
set_quantizer_by_name(lowerCAmelCase_ , [R'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=lowerCAmelCase_ )
if args.quant_enable_layer_module:
set_quantizer_by_name(lowerCAmelCase_ , [R'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=lowerCAmelCase_ )
if args.recalibrate_weights:
recalibrate_weights(lowerCAmelCase_ )
if args.fuse_qkv:
fuse_qkv(lowerCAmelCase_ , lowerCAmelCase_ )
if args.clip_gelu:
clip_gelu(lowerCAmelCase_ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
logger.info('''Enabling Calibration''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'''{name:80}: {module}''' )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
logger.info('''Loading calibrated amax''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('''percentile''' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
def fusea(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
for mod in [qq, qk, qv]:
if not hasattr(lowerCAmelCase_ , '''_amax''' ):
print(''' WARNING: NO AMAX BUFFER''' )
return
_snake_case : Tuple = qq._amax.detach().item()
_snake_case : Tuple = qk._amax.detach().item()
_snake_case : List[Any] = qv._amax.detach().item()
_snake_case : List[str] = max(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
qq._amax.fill_(lowerCAmelCase_ )
qk._amax.fill_(lowerCAmelCase_ )
qv._amax.fill_(lowerCAmelCase_ )
logger.info(f''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' )
for name, mod in model.named_modules():
if name.endswith('''.attention.self''' ):
logger.info(f'''FUSE_QKV: {name:{name_width}}''' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ):
_snake_case : List[Any] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=lowerCAmelCase_ )
_snake_case : List[str] = mod._input_quantizer._amax.data.detach().item()
logger.info(f'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None:
_snake_case : Dict = mod.weight.shape[0]
_snake_case : Optional[int] = mod._weight_quantizer._amax.detach()
_snake_case : Optional[int] = torch.ones(lowerCAmelCase_ , dtype=amax.dtype , device=amax.device ) * amax
print(f'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ):
if not hasattr(mod.weight_quantizer , '''_amax''' ):
print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
_snake_case : int = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
_snake_case : Dict = set(range(len(mod.weight.size() ) ) ) - axis_set
_snake_case : Optional[int] = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCAmelCase_ , keepdims=lowerCAmelCase_ ).detach()
logger.info(f'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' )
_snake_case : Tuple = amax
def _a ( lowerCAmelCase_ , lowerCAmelCase_=25 , lowerCAmelCase_=180 , lowerCAmelCase_=None ):
"""simple docstring"""
if ignore is None:
_snake_case : Dict = []
elif not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case : Optional[int] = [ignore]
_snake_case : str = 0
for name, mod in model.named_modules():
if not hasattr(lowerCAmelCase_ , '''weight''' ):
continue
_snake_case : Optional[int] = max(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
for name, mod in model.named_modules():
_snake_case : Optional[Any] = getattr(lowerCAmelCase_ , '''_input_quantizer''' , lowerCAmelCase_ )
_snake_case : Tuple = getattr(lowerCAmelCase_ , '''_weight_quantizer''' , lowerCAmelCase_ )
if not hasattr(lowerCAmelCase_ , '''weight''' ):
continue
if type(lowerCAmelCase_ ) in ignore:
continue
if [True for s in ignore if type(lowerCAmelCase_ ) is str and s in name]:
continue
_snake_case : Optional[int] = f'''Act:{input_q.extra_repr()}'''
_snake_case : Any = f'''Wgt:{weight_q.extra_repr()}'''
_snake_case : Optional[int] = f'''{name:{name_width}} {act_str} {wgt_str}'''
if len(lowerCAmelCase_ ) <= line_width:
logger.info(lowerCAmelCase_ )
else:
logger.info(f'''{name:{name_width}} {act_str}''' )
logger.info(f'''{" ":{name_width}} {wgt_str}''' )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : str = 0
for name, mod in model.named_modules():
if isinstance(lowerCAmelCase_ , pytorch_quantization.nn.TensorQuantizer ):
print(f'''{name:80} {mod}''' )
count += 1
print(f'''{count} TensorQuantizers found in model''' )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if quantizer_mod is not None:
assert hasattr(lowerCAmelCase_ , lowerCAmelCase_ )
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
logger.warning(f'''{name} has no {quantizer}''' )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="both" , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = f'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(lowerCAmelCase_ , lowerCAmelCase_ , '''_input_quantizer''' , lowerCAmelCase_ , lowerCAmelCase_ )
if which in ["weight", "both"]:
set_quantizer(lowerCAmelCase_ , lowerCAmelCase_ , '''_weight_quantizer''' , lowerCAmelCase_ , lowerCAmelCase_ )
logger.info(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(lowerCAmelCase_ , '''_input_quantizer''' ) or hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ):
for n in names:
if re.search(lowerCAmelCase_ , lowerCAmelCase_ ):
set_quantizers(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
elif name.endswith('''_quantizer''' ):
for n in names:
if re.search(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case : Any = f'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
logger.info(lowerCAmelCase_ )
| 47
| 1
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=12 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=32 , lowerCamelCase__=2 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=0.02 , lowerCamelCase__=0 , lowerCamelCase__=None , ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = projection_dim
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = initializer_range
__lowerCamelCase = scope
__lowerCamelCase = bos_token_id
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
__lowerCamelCase = input_mask.numpy()
__lowerCamelCase , __lowerCamelCase = input_mask.shape
__lowerCamelCase = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase__ ):
__lowerCamelCase = 1
__lowerCamelCase = 0
__lowerCamelCase = self.get_config()
return config, input_ids, tf.convert_to_tensor(lowerCamelCase__ )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = TFBlipTextModel(config=lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , training=lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (TFBlipTextModel,) if is_tf_available() else ()
snake_case_ = False
snake_case_ = False
snake_case_ = False
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = BlipTextModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
pass
@slow
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = TFBlipTextModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__=True ) -> Dict:
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=lowerCamelCase__ )
| 469
|
def lowerCamelCase_ ( ) -> List[str]:
"""simple docstring"""
__lowerCamelCase = 0
for i in range(1 , 1001 ):
total += i**i
return str(UpperCamelCase__ )[-10:]
if __name__ == "__main__":
print(solution())
| 469
| 1
|
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class lowercase :
_a = field(
default="codeparrot/codeparrot",metadata={"help": "Model name or path of model to be trained."} )
_a = field(
default="./",metadata={"help": "Save dir where model repo is cloned and models updates are saved to."} )
_a = field(
default="codeparrot/codeparrot-clean-train",metadata={"help": "Name or path of training dataset."} )
_a = field(
default="codeparrot/codeparrot-clean-valid",metadata={"help": "Name or path of validation dataset."} )
_a = field(default=2,metadata={"help": "Batch size for training."} )
_a = field(default=2,metadata={"help": "Batch size for evaluation."} )
_a = field(default=0.1,metadata={"help": "Value of weight decay."} )
_a = field(
default=1_0_0_0_0,metadata={"help": "Size of buffer used to shuffle streaming dataset."} )
_a = field(default=2e-4,metadata={"help": "Learning rate fo training."} )
_a = field(default="cosine",metadata={"help": "Learning rate."} )
_a = field(
default=7_5_0,metadata={"help": "Number of warmup steps in the learning rate schedule."} )
_a = field(
default=1_6,metadata={"help": "Number of gradient accumulation steps."} )
_a = field(
default=UpperCamelCase__,metadata={"help": "Use gradient checkpointing to reduce memory footprint."} )
_a = field(default=5_0_0_0_0,metadata={"help": "Maximum number of training steps."} )
_a = field(
default=-1,metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
_a = field(default=1_0_2_4,metadata={"help": "Sequence lengths used for training."} )
_a = field(default=1,metadata={"help": "Training seed."} )
_a = field(
default=1_0_2_4,metadata={"help": "Interval to save checkpoints. Measured as number of forward passes not training steps."},)
_a = field(
default=UpperCamelCase__,metadata={"help": "States path if the training should continue from a checkpoint folder."} )
_a = field(default=UpperCamelCase__,metadata={"help": "If True the data is pretokenized."} )
@dataclass
class lowercase :
_a = field(
default="codeparrot/codeparrot",metadata={"help": "Model name or path of model to be evaluated."} )
_a = field(
default="codeparrot/codeparrot-clean-valid",metadata={"help": "Name or path of validation dataset."} )
_a = field(default=2,metadata={"help": "Batch size used for evaluation."} )
_a = field(
default=-1,metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
_a = field(default=1_0_2_4,metadata={"help": "Length of sequences to be evaluated."} )
_a = field(default=1,metadata={"help": "Random seed used for evaluation."} )
@dataclass
class lowercase :
_a = field(
default="codeparrot/codeparrot",metadata={"help": "Model name or path of model to be evaluated."} )
_a = field(default=UpperCamelCase__,metadata={"help": "Number of workers used for code evaluation."} )
_a = field(
default=UpperCamelCase__,metadata={"help": "The number of human-eval tasks to run. If not included all tasks are evaluated."},)
_a = field(
default=UpperCamelCase__,metadata={"help": "Sample from the language model's output distribution."} )
_a = field(default=0.2,metadata={"help": "Sampling temperature used for generation."} )
_a = field(default=2_5_6,metadata={"help": "Maximum number of newly generated tokens."} )
_a = field(default=0,metadata={"help": "Top-k parameter used for generation."} )
_a = field(default=0.95,metadata={"help": "Top-p parameter used for nucleus sampling."} )
_a = field(default=1_0,metadata={"help": "Number of generations to run in parallel."} )
_a = field(
default=2_0_0,metadata={"help": "Number of completions to generate for each sample."} )
_a = field(default=1,metadata={"help": "Random seed used for evaluation."} )
_a = field(
default="eval_results.json",metadata={"help": "Random seed used for evaluation."} )
_a = field(
default="0",metadata={"help": "Allow `code_eval` to execute Python code on machine"} )
_a = field(
default=-1,metadata={
"help": (
"Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"
" number corresponds to which GPU device id to run on."
)
},)
@dataclass
class lowercase :
_a = field(
default=UpperCamelCase__,metadata={
"help": "The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."
},)
_a = field(
default="transformersbook/codeparrot",metadata={"help": "Folder or name of dataset to process."} )
_a = field(
default="codeparrot-clean",metadata={"help": "Folder to save processed processed dataset."} )
_a = field(
default=1_0_0_0_0_0,metadata={"help": "Number of files to save per JSON output file."} )
_a = field(default="content",metadata={"help": "Column containing text data to process."} )
_a = field(
default=1_0_0_0,metadata={"help": "Maximum line length in file, otherwise file is filtered."} )
_a = field(
default=1_0_0,metadata={"help": "Maximum mean line length in file, otherwise file is filtered."} )
_a = field(
default=0.25,metadata={"help": "Maximum fraction of non-alphanumeric characters, otherwise file is filtered."} )
_a = field(
default=1.5,metadata={"help": "Minimum character token ratio for the file, otherwise file is filtered."} )
_a = field(
default=0.7,metadata={"help": "Probability for filtering config, test and uncommon files."} )
_a = field(
default="codeparrot/codeparrot",metadata={"help": "Name or path to the tokenizer."},)
_a = field(
default=UpperCamelCase__,metadata={"help": "If True, near-duplicate samples are removed."} )
_a = field(
default=0.85,metadata={"help": "Jaccard threshold for near-duplicate samples."} )
@dataclass
class lowercase :
_a = field(
default="gpt2",metadata={"help": "Base tokenizer to build new tokenizer from."} )
_a = field(
default="transformersbook/codeparrot-train",metadata={"help": "Dataset to train tokenizer on."} )
_a = field(default="content",metadata={"help": "Column containing text data to process."} )
_a = field(default=2_0_0_0_0_0,metadata={"help": "Number of examples to train tokenizer on."} )
_a = field(
default=3_2_7_6_8,metadata={"help": "Number of examples to train the tokenizer on."} )
_a = field(default="codeparrot",metadata={"help": "Name of new tokenizer."} )
_a = field(default=UpperCamelCase__,metadata={"help": "Push saved tokenizer to the hub."} )
@dataclass
class lowercase :
_a = field(
default="codeparrot/codeparrot",metadata={"help": "Name or path to the tokenizer."} )
_a = field(
default="codeparrot/codeparrot-clean-train",metadata={"help": "Name or path to the dataset to pretokenize."} )
_a = field(
default="tokenized-codeparrot-train",metadata={"help": "Repo name of the pretokenized data."} )
_a = field(default=UpperCamelCase__,metadata={"help": "Number of workers used for code evaluation."} )
@dataclass
class lowercase :
_a = field(
default="gpt2-large",metadata={"help": "Configuration to use for model initialization."} )
_a = field(
default="codeparrot/codeparrot",metadata={"help": "Tokenizer attached to model."} )
_a = field(default="codeparrot",metadata={"help": "Name of the created model."} )
_a = field(default=UpperCamelCase__,metadata={"help": "Push saved tokenizer to the hub."} )
| 704
|
from __future__ import annotations
class lowercase :
def __init__( self , _a = 0 ) -> str:
_A : Any = key
def a__ ( self , _a , _a ) -> list[str]:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : Any = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_a ) ^ key ) for ch in content]
def a__ ( self , _a , _a ) -> list[str]:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_a ) ^ key ) for ch in content]
def a__ ( self , _a , _a = 0 ) -> str:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[Any] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_A : List[str] = """"""
for ch in content:
ans += chr(ord(_a ) ^ key )
return ans
def a__ ( self , _a , _a = 0 ) -> str:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[str] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_A : List[str] = """"""
for ch in content:
ans += chr(ord(_a ) ^ key )
return ans
def a__ ( self , _a , _a = 0 ) -> bool:
assert isinstance(_a , _a ) and isinstance(_a , _a )
try:
with open(_a ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_a , _a ) )
except OSError:
return False
return True
def a__ ( self , _a , _a ) -> bool:
assert isinstance(_a , _a ) and isinstance(_a , _a )
try:
with open(_a ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_a , _a ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 54
| 0
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__A : List[str] = TypeVar("T")
class A_ (Generic[T] ):
def __init__( self , _A ):
'''simple docstring'''
UpperCAmelCase = data
UpperCAmelCase = None
def __str__( self ):
'''simple docstring'''
return F"""{self.data}"""
class A_ (Generic[T] ):
def __init__( self ):
'''simple docstring'''
UpperCAmelCase = None
def __iter__( self ):
'''simple docstring'''
UpperCAmelCase = self.top
while node:
yield node.data
UpperCAmelCase = node.next
def __str__( self ):
'''simple docstring'''
return "->".join([str(lowercase__ ) for item in self] )
def __len__( self ):
'''simple docstring'''
return len(tuple(iter(self ) ) )
def _lowercase ( self ):
'''simple docstring'''
return self.top is None
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = Node(lowercase__ )
if not self.is_empty():
UpperCAmelCase = self.top
UpperCAmelCase = node
def _lowercase ( self ):
'''simple docstring'''
if self.is_empty():
raise IndexError('''pop from empty stack''' )
assert isinstance(self.top , lowercase__ )
UpperCAmelCase = self.top
UpperCAmelCase = self.top.next
return pop_node.data
def _lowercase ( self ):
'''simple docstring'''
if self.is_empty():
raise IndexError('''peek from empty stack''' )
assert self.top is not None
return self.top.data
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 130
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowercase: int = logging.get_logger(__name__)
_lowercase: Union[str, Any] = {'''vocab_file''': '''sentencepiece.bpe.model'''}
_lowercase: Dict = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
}
}
_lowercase: List[Any] = {
'''camembert-base''': 5_1_2,
}
_lowercase: Dict = '''▁'''
class lowerCamelCase__ ( UpperCAmelCase ):
UpperCamelCase__ =VOCAB_FILES_NAMES
UpperCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ =["input_ids", "attention_mask"]
def __init__( self : str , lowercase__ : int , lowercase__ : Tuple="<s>" , lowercase__ : Optional[int]="</s>" , lowercase__ : Optional[Any]="</s>" , lowercase__ : Any="<s>" , lowercase__ : Union[str, Any]="<unk>" , lowercase__ : Union[str, Any]="<pad>" , lowercase__ : Optional[int]="<mask>" , lowercase__ : str=["<s>NOTUSED", "</s>NOTUSED"] , lowercase__ : Optional[Dict[str, Any]] = None , **lowercase__ : Union[str, Any] , ):
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token
_lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , additional_special_tokens=lowercase__ , sp_model_kwargs=self.sp_model_kwargs , **lowercase__ , )
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase__ ) )
_lowerCAmelCase = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
_lowerCAmelCase = {'<s>NOTUSED': 0, '<pad>': 1, '</s>NOTUSED': 2, '<unk>': 3}
_lowerCAmelCase = len(self.fairseq_tokens_to_ids )
_lowerCAmelCase = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
_lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , lowercase__ : List[int] , lowercase__ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
_lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self : Any , lowercase__ : List[int] , lowercase__ : Optional[List[int]] = None , lowercase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase__ , token_ids_a=lowercase__ , already_has_special_tokens=lowercase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowercase__ )) + [1]
return [1] + ([0] * len(lowercase__ )) + [1, 1] + ([0] * len(lowercase__ )) + [1]
def SCREAMING_SNAKE_CASE__ ( self : int , lowercase__ : List[int] , lowercase__ : Optional[List[int]] = None ):
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
_lowerCAmelCase = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , lowercase__ : str ):
return self.sp_model.encode(lowercase__ , out_type=lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , lowercase__ : Optional[Any] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(lowercase__ ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , lowercase__ : Tuple ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE__ ( self : int , lowercase__ : Optional[int] ):
_lowerCAmelCase = []
_lowerCAmelCase = ''
_lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase__ ) + token
_lowerCAmelCase = True
_lowerCAmelCase = []
else:
current_sub_tokens.append(lowercase__ )
_lowerCAmelCase = False
out_string += self.sp_model.decode(lowercase__ )
return out_string.strip()
def __getstate__( self : Any ):
_lowerCAmelCase = self.__dict__.copy()
_lowerCAmelCase = None
return state
def __setstate__( self : Optional[Any] , lowercase__ : Union[str, Any] ):
_lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_lowerCAmelCase = {}
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , lowercase__ : str , lowercase__ : Optional[str] = None ):
if not os.path.isdir(lowercase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCAmelCase = os.path.join(
lowercase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__ , 'wb' ) as fi:
_lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (out_vocab_file,)
| 192
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( _snake_case, unittest.TestCase ):
UpperCamelCase__ : Dict =KandinskyInpaintPipeline
UpperCamelCase__ : List[str] =["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
UpperCamelCase__ : str =[
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
UpperCamelCase__ : str =[
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCamelCase__ : Optional[int] =False
@property
def __a ( self :Any) -> List[Any]:
return 32
@property
def __a ( self :Dict) -> Optional[Any]:
return 32
@property
def __a ( self :Any) -> Any:
return self.time_input_dim
@property
def __a ( self :Optional[Any]) -> Optional[Any]:
return self.time_input_dim * 4
@property
def __a ( self :Optional[int]) -> str:
return 100
@property
def __a ( self :Optional[Any]) -> Dict:
UpperCAmelCase_ = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''')
return tokenizer
@property
def __a ( self :Any) -> List[Any]:
torch.manual_seed(0)
UpperCAmelCase_ = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
UpperCAmelCase_ = MultilingualCLIP(_lowercase)
UpperCAmelCase_ = text_encoder.eval()
return text_encoder
@property
def __a ( self :Union[str, Any]) -> Optional[int]:
torch.manual_seed(0)
UpperCAmelCase_ = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
UpperCAmelCase_ = UNetaDConditionModel(**_lowercase)
return model
@property
def __a ( self :Dict) -> str:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __a ( self :Optional[int]) -> List[str]:
torch.manual_seed(0)
UpperCAmelCase_ = VQModel(**self.dummy_movq_kwargs)
return model
def __a ( self :Any) -> Optional[Any]:
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = self.dummy_tokenizer
UpperCAmelCase_ = self.dummy_unet
UpperCAmelCase_ = self.dummy_movq
UpperCAmelCase_ = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.00_085 , beta_end=0.012 , clip_sample=_lowercase , set_alpha_to_one=_lowercase , steps_offset=1 , prediction_type='''epsilon''' , thresholding=_lowercase , )
UpperCAmelCase_ = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __a ( self :int , _lowercase :str , _lowercase :Any=0) -> Optional[Any]:
UpperCAmelCase_ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_lowercase)).to(_lowercase)
UpperCAmelCase_ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1)).to(_lowercase)
# create init_image
UpperCAmelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowercase)).to(_lowercase)
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(_lowercase)).convert('''RGB''').resize((256, 256))
# create mask
UpperCAmelCase_ = np.ones((64, 64) , dtype=np.floataa)
UpperCAmelCase_ = 0
if str(_lowercase).startswith('''mps'''):
UpperCAmelCase_ = torch.manual_seed(_lowercase)
else:
UpperCAmelCase_ = torch.Generator(device=_lowercase).manual_seed(_lowercase)
UpperCAmelCase_ = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def __a ( self :Dict) -> List[str]:
UpperCAmelCase_ = '''cpu'''
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**_lowercase)
UpperCAmelCase_ = pipe.to(_lowercase)
pipe.set_progress_bar_config(disable=_lowercase)
UpperCAmelCase_ = pipe(**self.get_dummy_inputs(_lowercase))
UpperCAmelCase_ = output.images
UpperCAmelCase_ = pipe(
**self.get_dummy_inputs(_lowercase) , return_dict=_lowercase , )[0]
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
print(f"image.shape {image.shape}")
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ = np.array(
[0.8_326_919, 0.73_790_467, 0.20_918_581, 0.9_309_612, 0.5_511_791, 0.43_713_328, 0.5_513_321, 0.49_922_934, 0.59_497_786])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def __a ( self :int) -> Any:
super().test_inference_batch_single_identical(expected_max_diff=3E-3)
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
def __a ( self :Dict) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self :Any) -> str:
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''')
UpperCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''')
UpperCAmelCase_ = np.ones((768, 768) , dtype=np.floataa)
UpperCAmelCase_ = 0
UpperCAmelCase_ = '''a hat'''
UpperCAmelCase_ = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa)
pipe_prior.to(_lowercase)
UpperCAmelCase_ = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa)
UpperCAmelCase_ = pipeline.to(_lowercase)
pipeline.set_progress_bar_config(disable=_lowercase)
UpperCAmelCase_ = torch.Generator(device='''cpu''').manual_seed(0)
UpperCAmelCase_ , UpperCAmelCase_ = pipe_prior(
_lowercase , generator=_lowercase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
UpperCAmelCase_ = pipeline(
_lowercase , image=_lowercase , mask_image=_lowercase , image_embeds=_lowercase , negative_image_embeds=_lowercase , generator=_lowercase , num_inference_steps=100 , height=768 , width=768 , output_type='''np''' , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowercase , _lowercase)
| 711
|
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = get_tests_dir("fixtures/test_sentencepiece_bpe_char.model")
@require_sentencepiece
@require_tokenizers
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : List[Any] =SpeechTaTokenizer
UpperCamelCase__ : Optional[Any] =False
UpperCamelCase__ : List[Any] =True
def __a ( self :Union[str, Any]) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = SpeechTaTokenizer(_lowercase)
UpperCAmelCase_ = AddedToken('''<mask>''' , lstrip=_lowercase , rstrip=_lowercase)
UpperCAmelCase_ = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token})
tokenizer.add_tokens(['''<ctc_blank>'''])
tokenizer.save_pretrained(self.tmpdirname)
def __a ( self :Tuple , _lowercase :Optional[int]) -> Optional[int]:
UpperCAmelCase_ = '''this is a test'''
UpperCAmelCase_ = '''this is a test'''
return input_text, output_text
def __a ( self :List[Any] , _lowercase :str , _lowercase :List[str]=False , _lowercase :Union[str, Any]=20 , _lowercase :Any=5) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ = self.get_input_output_texts(_lowercase)
UpperCAmelCase_ = tokenizer.encode(_lowercase , add_special_tokens=_lowercase)
UpperCAmelCase_ = tokenizer.decode(_lowercase , clean_up_tokenization_spaces=_lowercase)
return text, ids
def __a ( self :Dict) -> str:
UpperCAmelCase_ = '''<pad>'''
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase) , _lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase) , _lowercase)
def __a ( self :int) -> Dict:
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<s>''')
self.assertEqual(vocab_keys[1] , '''<pad>''')
self.assertEqual(vocab_keys[-4] , '''œ''')
self.assertEqual(vocab_keys[-2] , '''<mask>''')
self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''')
self.assertEqual(len(_lowercase) , 81)
def __a ( self :Dict) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 79)
def __a ( self :Dict) -> Union[str, Any]:
UpperCAmelCase_ = self.get_tokenizers(do_lower_case=_lowercase)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
UpperCAmelCase_ = tokenizer.vocab_size
UpperCAmelCase_ = len(_lowercase)
self.assertNotEqual(_lowercase , 0)
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
UpperCAmelCase_ = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
UpperCAmelCase_ = tokenizer.add_tokens(_lowercase)
UpperCAmelCase_ = tokenizer.vocab_size
UpperCAmelCase_ = len(_lowercase)
self.assertNotEqual(_lowercase , 0)
self.assertEqual(_lowercase , _lowercase)
self.assertEqual(_lowercase , len(_lowercase))
self.assertEqual(_lowercase , all_size + len(_lowercase))
UpperCAmelCase_ = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=_lowercase)
self.assertGreaterEqual(len(_lowercase) , 4)
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1)
UpperCAmelCase_ = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
UpperCAmelCase_ = tokenizer.add_special_tokens(_lowercase)
UpperCAmelCase_ = tokenizer.vocab_size
UpperCAmelCase_ = len(_lowercase)
self.assertNotEqual(_lowercase , 0)
self.assertEqual(_lowercase , _lowercase)
self.assertEqual(_lowercase , len(_lowercase))
self.assertEqual(_lowercase , all_size_a + len(_lowercase))
UpperCAmelCase_ = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=_lowercase)
self.assertGreaterEqual(len(_lowercase) , 6)
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[0] , tokens[1])
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] , tokens[-4])
self.assertEqual(tokens[0] , tokenizer.eos_token_id)
self.assertEqual(tokens[-3] , tokenizer.pad_token_id)
def __a ( self :Any) -> List[str]:
pass
def __a ( self :Any) -> Tuple:
pass
def __a ( self :Dict) -> Dict:
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = tokenizer.tokenize('''This is a test''')
# fmt: off
self.assertListEqual(_lowercase , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''])
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
UpperCAmelCase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
_lowercase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''])
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_lowercase)
# fmt: off
self.assertListEqual(_lowercase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26])
# fmt: on
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_lowercase)
self.assertListEqual(
_lowercase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''])
@slow
def __a ( self :Any) -> List[Any]:
# Use custom sequence because this tokenizer does not handle numbers.
UpperCAmelCase_ = [
'''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '''
'''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '''
'''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '''
'''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''',
'''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '''
'''conditioning on both left and right context in all layers.''',
'''The quick brown fox jumps over the lazy dog.''',
]
# fmt: off
UpperCAmelCase_ = {
'''input_ids''': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=_lowercase , )
| 561
| 0
|
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = Dict[str, Any]
_lowerCAmelCase = List[Prediction]
@add_end_docstrings(__snake_case )
class UpperCamelCase (__snake_case ):
def __init__( self :int , *__magic_name__ :Tuple , **__magic_name__ :int ) ->Optional[int]:
super().__init__(*__magic_name__ , **__magic_name__ )
if self.framework == "tf":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , """vision""" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def __snake_case ( self :Dict , **__magic_name__ :int ) ->Optional[int]:
lowercase : Union[str, Any] = {}
if "threshold" in kwargs:
lowercase : Optional[Any] = kwargs["""threshold"""]
return {}, {}, postprocess_kwargs
def __call__( self :Union[str, Any] , *__magic_name__ :List[Any] , **__magic_name__ :Optional[Any] ) ->Union[Predictions, List[Prediction]]:
return super().__call__(*__magic_name__ , **__magic_name__ )
def __snake_case ( self :Optional[Any] , __magic_name__ :Optional[Any] ) ->Tuple:
lowercase : int = load_image(__magic_name__ )
lowercase : Optional[Any] = torch.IntTensor([[image.height, image.width]] )
lowercase : List[Any] = self.image_processor(images=[image] , return_tensors="""pt""" )
if self.tokenizer is not None:
lowercase : str = self.tokenizer(text=inputs["""words"""] , boxes=inputs["""boxes"""] , return_tensors="""pt""" )
lowercase : Union[str, Any] = target_size
return inputs
def __snake_case ( self :Optional[int] , __magic_name__ :List[str] ) ->Optional[int]:
lowercase : str = model_inputs.pop("""target_size""" )
lowercase : Optional[Any] = self.model(**__magic_name__ )
lowercase : Any = outputs.__class__({"""target_size""": target_size, **outputs} )
if self.tokenizer is not None:
lowercase : str = model_inputs["""bbox"""]
return model_outputs
def __snake_case ( self :Optional[Any] , __magic_name__ :Optional[int] , __magic_name__ :Union[str, Any]=0.9 ) ->Optional[int]:
lowercase : int = model_outputs["""target_size"""]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
lowercase , lowercase : Union[str, Any] = target_size[0].tolist()
def unnormalize(__magic_name__ :Any ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1_000),
(height * bbox[1] / 1_000),
(width * bbox[2] / 1_000),
(height * bbox[3] / 1_000),
] ) )
lowercase , lowercase : Dict = model_outputs["""logits"""].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
lowercase : Dict = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
lowercase : Optional[int] = [unnormalize(__magic_name__ ) for bbox in model_outputs["""bbox"""].squeeze(0 )]
lowercase : Tuple = ["""score""", """label""", """box"""]
lowercase : List[str] = [dict(zip(__magic_name__ , __magic_name__ ) ) for vals in zip(scores.tolist() , __magic_name__ , __magic_name__ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
lowercase : List[str] = self.image_processor.post_process_object_detection(__magic_name__ , __magic_name__ , __magic_name__ )
lowercase : int = raw_annotations[0]
lowercase : Union[str, Any] = raw_annotation["""scores"""]
lowercase : List[str] = raw_annotation["""labels"""]
lowercase : Any = raw_annotation["""boxes"""]
lowercase : str = scores.tolist()
lowercase : Union[str, Any] = [self.model.config.idalabel[label.item()] for label in labels]
lowercase : str = [self._get_bounding_box(__magic_name__ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
lowercase : int = ["""score""", """label""", """box"""]
lowercase : Optional[int] = [
dict(zip(__magic_name__ , __magic_name__ ) )
for vals in zip(raw_annotation["""scores"""] , raw_annotation["""labels"""] , raw_annotation["""boxes"""] )
]
return annotation
def __snake_case ( self :List[Any] , __magic_name__ :"torch.Tensor" ) ->Dict[str, int]:
if self.framework != "pt":
raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""" )
lowercase , lowercase , lowercase , lowercase : Any = box.int().tolist()
lowercase : int = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 264
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 264
| 1
|
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
lowerCamelCase__ = """0.12""" # assumed parallelism: 8
if is_torch_available():
import torch
def UpperCamelCase ( snake_case__ : str ,snake_case__ : Optional[int] ,snake_case__ : int=None ):
'''simple docstring'''
if rng is None:
__snake_case :int = random.Random()
__snake_case :Any = 1
for dim in shape:
total_dims *= dim
__snake_case :List[Any] = []
for _ in range(snake_case__ ):
values.append(rng.randint(0 ,vocab_size - 1 ) )
__snake_case :List[str] = np.array(snake_case__ ,dtype=jnp.intaa ).reshape(snake_case__ )
return output
def UpperCamelCase ( snake_case__ : List[str] ,snake_case__ : Dict=None ):
'''simple docstring'''
__snake_case :Optional[Any] = ids_tensor(snake_case__ ,vocab_size=2 ,rng=snake_case__ )
# make sure that at least one token is attended to for each batch
__snake_case :int = 1
return attn_mask
@require_flax
class snake_case__ :
'''simple docstring'''
lowerCamelCase : str = None
lowerCamelCase : Tuple = ()
def __lowercase ( self ) -> int:
'''simple docstring'''
__snake_case , __snake_case :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
__snake_case :List[str] = 2
__snake_case :Optional[Any] = inputs["""input_ids"""].shape[-1] // 2
__snake_case :Dict = inputs["""input_ids"""][:max_batch_size, :sequence_length]
__snake_case :str = jnp.ones_like(a__ )
__snake_case :str = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
__snake_case :Dict = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
__snake_case :Optional[int] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def __lowercase ( self ) -> Any:
'''simple docstring'''
__snake_case , __snake_case , __snake_case , __snake_case :Tuple = self._get_input_ids_and_config()
__snake_case :Dict = False
__snake_case :Tuple = max_length
__snake_case :Optional[Any] = 0
for model_class in self.all_generative_model_classes:
__snake_case :str = model_class(a__ )
__snake_case :Any = model_class.__name__[4:] # Skip the "Flax" at the beginning
__snake_case :Optional[Any] = getattr(a__ , a__ )
__snake_case :Any = pt_model_class(a__ ).eval()
__snake_case :Optional[Any] = load_flax_weights_in_pytorch_model(a__ , flax_model.params )
__snake_case :Union[str, Any] = flax_model.generate(a__ ).sequences
__snake_case :Any = pt_model.generate(torch.tensor(a__ , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
__snake_case :List[str] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def __lowercase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case , __snake_case , __snake_case , __snake_case :int = self._get_input_ids_and_config()
__snake_case :Dict = False
__snake_case :Tuple = max_length
for model_class in self.all_generative_model_classes:
__snake_case :Any = model_class(a__ )
__snake_case :Dict = model.generate(a__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a__ )
__snake_case :List[str] = jit(model.generate )
__snake_case :List[Any] = jit_generate(a__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __lowercase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case , __snake_case , __snake_case , __snake_case :Tuple = self._get_input_ids_and_config()
__snake_case :Any = True
__snake_case :Dict = max_length
for model_class in self.all_generative_model_classes:
__snake_case :List[str] = model_class(a__ )
__snake_case :Any = model.generate(a__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a__ )
__snake_case :int = jit(model.generate )
__snake_case :Tuple = jit_generate(a__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case , __snake_case , __snake_case , __snake_case :Tuple = self._get_input_ids_and_config()
__snake_case :Optional[int] = False
__snake_case :Optional[Any] = max_length
__snake_case :Tuple = 2
for model_class in self.all_generative_model_classes:
__snake_case :List[Any] = model_class(a__ )
__snake_case :Union[str, Any] = model.generate(a__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a__ )
__snake_case :List[str] = jit(model.generate )
__snake_case :str = jit_generate(a__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __lowercase ( self ) -> Dict:
'''simple docstring'''
__snake_case , __snake_case , __snake_case , __snake_case :List[str] = self._get_input_ids_and_config()
__snake_case :int = False
__snake_case :int = max_length
__snake_case :str = 2
__snake_case :Optional[Any] = 2
for model_class in self.all_generative_model_classes:
__snake_case :Union[str, Any] = model_class(a__ )
__snake_case :Optional[Any] = model.generate(a__ ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def __lowercase ( self ) -> Dict:
'''simple docstring'''
__snake_case , __snake_case , __snake_case , __snake_case :Tuple = self._get_input_ids_and_config()
__snake_case :List[Any] = True
__snake_case :int = max_length
__snake_case :List[str] = 0.8
__snake_case :str = 10
__snake_case :Tuple = 0.3
__snake_case :str = 1
__snake_case :int = 8
__snake_case :Union[str, Any] = 9
for model_class in self.all_generative_model_classes:
__snake_case :str = model_class(a__ )
__snake_case :Any = model.generate(a__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a__ )
__snake_case :List[Any] = jit(model.generate )
__snake_case :Optional[int] = jit_generate(a__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __lowercase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case , __snake_case , __snake_case , __snake_case :Optional[int] = self._get_input_ids_and_config()
__snake_case :Optional[int] = max_length
__snake_case :List[str] = 1
__snake_case :Tuple = 8
__snake_case :int = 9
for model_class in self.all_generative_model_classes:
__snake_case :Tuple = model_class(a__ )
__snake_case :str = model.generate(a__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a__ )
__snake_case :List[Any] = jit(model.generate )
__snake_case :List[Any] = jit_generate(a__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case , __snake_case , __snake_case , __snake_case :Optional[int] = self._get_input_ids_and_config()
__snake_case :Dict = max_length
__snake_case :int = 2
__snake_case :List[Any] = 1
__snake_case :List[Any] = 8
__snake_case :Any = 9
for model_class in self.all_generative_model_classes:
__snake_case :str = model_class(a__ )
__snake_case :List[Any] = model.generate(a__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a__ )
__snake_case :int = jit(model.generate )
__snake_case :Dict = jit_generate(a__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __lowercase ( self ) -> Dict:
'''simple docstring'''
__snake_case , __snake_case , __snake_case , __snake_case :Any = self._get_input_ids_and_config()
# pad attention mask on the left
__snake_case :List[Any] = attention_mask.at[(0, 0)].set(0 )
__snake_case :Union[str, Any] = False
__snake_case :List[Any] = max_length
for model_class in self.all_generative_model_classes:
__snake_case :Optional[Any] = model_class(a__ )
__snake_case :List[Any] = model.generate(a__ , attention_mask=a__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a__ )
__snake_case :Tuple = jit(model.generate )
__snake_case :int = jit_generate(a__ , attention_mask=a__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __lowercase ( self ) -> Any:
'''simple docstring'''
__snake_case , __snake_case , __snake_case , __snake_case :int = self._get_input_ids_and_config()
# pad attention mask on the left
__snake_case :Dict = attention_mask.at[(0, 0)].set(0 )
__snake_case :List[str] = True
__snake_case :Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
__snake_case :List[str] = model_class(a__ )
__snake_case :Union[str, Any] = model.generate(a__ , attention_mask=a__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a__ )
__snake_case :Union[str, Any] = jit(model.generate )
__snake_case :Dict = jit_generate(a__ , attention_mask=a__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __lowercase ( self ) -> Dict:
'''simple docstring'''
__snake_case , __snake_case , __snake_case , __snake_case :Optional[int] = self._get_input_ids_and_config()
# pad attention mask on the left
__snake_case :Union[str, Any] = attention_mask.at[(0, 0)].set(0 )
__snake_case :Any = 2
__snake_case :Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
__snake_case :int = model_class(a__ )
__snake_case :Any = model.generate(a__ , attention_mask=a__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a__ )
__snake_case :Optional[int] = jit(model.generate )
__snake_case :Dict = jit_generate(a__ , attention_mask=a__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class snake_case__ ( unittest.TestCase):
'''simple docstring'''
def __lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case :List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" )
__snake_case :Union[str, Any] = FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
__snake_case :List[str] = """Hello world"""
__snake_case :int = tokenizer(a__ , return_tensors="""np""" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(a__ , """do_samples""" ):
model.generate(a__ , do_samples=a__ )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(a__ , """foo""" ):
__snake_case :Tuple = {"""foo""": """bar"""}
model.generate(a__ , **a__ )
| 291
|
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""kakaobrain/align-base""": """https://huggingface.co/kakaobrain/align-base/resolve/main/config.json""",
}
class snake_case__ ( lowercase_):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = "align_text_model"
def __init__( self , a__=3_05_22 , a__=7_68 , a__=12 , a__=12 , a__=30_72 , a__="gelu" , a__=0.1 , a__=0.1 , a__=5_12 , a__=2 , a__=0.02 , a__=1e-12 , a__=0 , a__="absolute" , a__=True , **a__ , ) -> List[str]:
'''simple docstring'''
super().__init__(**a__ )
__snake_case :Optional[int] = vocab_size
__snake_case :List[str] = hidden_size
__snake_case :Optional[Any] = num_hidden_layers
__snake_case :int = num_attention_heads
__snake_case :Optional[Any] = hidden_act
__snake_case :Union[str, Any] = intermediate_size
__snake_case :int = hidden_dropout_prob
__snake_case :Optional[Any] = attention_probs_dropout_prob
__snake_case :List[str] = max_position_embeddings
__snake_case :List[str] = type_vocab_size
__snake_case :Union[str, Any] = initializer_range
__snake_case :str = layer_norm_eps
__snake_case :Any = position_embedding_type
__snake_case :List[str] = use_cache
__snake_case :Optional[int] = pad_token_id
@classmethod
def __lowercase ( cls , a__ , **a__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(a__ )
__snake_case , __snake_case :Tuple = cls.get_config_dict(a__ , **a__ )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
__snake_case :Any = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(a__ , **a__ )
class snake_case__ ( lowercase_):
'''simple docstring'''
lowerCamelCase : Optional[int] = "align_vision_model"
def __init__( self , a__ = 3 , a__ = 6_00 , a__ = 2.0 , a__ = 3.1 , a__ = 8 , a__ = [3, 3, 5, 3, 5, 5, 3] , a__ = [32, 16, 24, 40, 80, 1_12, 1_92] , a__ = [16, 24, 40, 80, 1_12, 1_92, 3_20] , a__ = [] , a__ = [1, 2, 2, 2, 1, 2, 1] , a__ = [1, 2, 2, 3, 3, 4, 1] , a__ = [1, 6, 6, 6, 6, 6, 6] , a__ = 0.25 , a__ = "swish" , a__ = 25_60 , a__ = "mean" , a__ = 0.02 , a__ = 0.0_01 , a__ = 0.99 , a__ = 0.2 , **a__ , ) -> List[Any]:
'''simple docstring'''
super().__init__(**a__ )
__snake_case :Union[str, Any] = num_channels
__snake_case :List[str] = image_size
__snake_case :int = width_coefficient
__snake_case :int = depth_coefficient
__snake_case :List[Any] = depth_divisor
__snake_case :Any = kernel_sizes
__snake_case :Optional[int] = in_channels
__snake_case :Optional[int] = out_channels
__snake_case :int = depthwise_padding
__snake_case :List[str] = strides
__snake_case :Union[str, Any] = num_block_repeats
__snake_case :Dict = expand_ratios
__snake_case :Union[str, Any] = squeeze_expansion_ratio
__snake_case :Any = hidden_act
__snake_case :Optional[Any] = hidden_dim
__snake_case :Union[str, Any] = pooling_type
__snake_case :Union[str, Any] = initializer_range
__snake_case :Optional[Any] = batch_norm_eps
__snake_case :List[Any] = batch_norm_momentum
__snake_case :Optional[int] = drop_connect_rate
__snake_case :Union[str, Any] = sum(a__ ) * 4
@classmethod
def __lowercase ( cls , a__ , **a__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(a__ )
__snake_case , __snake_case :int = cls.get_config_dict(a__ , **a__ )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
__snake_case :str = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(a__ , **a__ )
class snake_case__ ( lowercase_):
'''simple docstring'''
lowerCamelCase : str = "align"
lowerCamelCase : Union[str, Any] = True
def __init__( self , a__=None , a__=None , a__=6_40 , a__=1.0 , a__=0.02 , **a__ , ) -> Dict:
'''simple docstring'''
super().__init__(**a__ )
if text_config is None:
__snake_case :Union[str, Any] = {}
logger.info("""text_config is None. Initializing the AlignTextConfig with default values.""" )
if vision_config is None:
__snake_case :str = {}
logger.info("""vision_config is None. Initializing the AlignVisionConfig with default values.""" )
__snake_case :List[Any] = AlignTextConfig(**a__ )
__snake_case :Tuple = AlignVisionConfig(**a__ )
__snake_case :Tuple = projection_dim
__snake_case :int = temperature_init_value
__snake_case :Any = initializer_range
@classmethod
def __lowercase ( cls , a__ , a__ , **a__ ) -> str:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a__ )
def __lowercase ( self ) -> Dict:
'''simple docstring'''
__snake_case :Optional[Any] = copy.deepcopy(self.__dict__ )
__snake_case :Dict = self.text_config.to_dict()
__snake_case :Union[str, Any] = self.vision_config.to_dict()
__snake_case :List[Any] = self.__class__.model_type
return output
| 291
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Dict = logging.get_logger(__name__)
lowerCamelCase : int = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''swinv2'''
UpperCamelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Dict , A_ : List[Any]=224 , A_ : Optional[Any]=4 , A_ : int=3 , A_ : Dict=96 , A_ : Any=[2, 2, 6, 2] , A_ : Optional[Any]=[3, 6, 12, 24] , A_ : Tuple=7 , A_ : Tuple=4.0 , A_ : str=True , A_ : str=0.0 , A_ : Union[str, Any]=0.0 , A_ : Optional[Any]=0.1 , A_ : str="gelu" , A_ : int=False , A_ : str=0.02 , A_ : List[Any]=1E-5 , A_ : Any=32 , **A_ : Tuple , ) -> Any:
"""simple docstring"""
super().__init__(**A_ )
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = embed_dim
lowerCamelCase_ = depths
lowerCamelCase_ = len(A_ )
lowerCamelCase_ = num_heads
lowerCamelCase_ = window_size
lowerCamelCase_ = mlp_ratio
lowerCamelCase_ = qkv_bias
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = drop_path_rate
lowerCamelCase_ = hidden_act
lowerCamelCase_ = use_absolute_embeddings
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = initializer_range
lowerCamelCase_ = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase_ = int(embed_dim * 2 ** (len(A_ ) - 1) )
lowerCamelCase_ = (0, 0, 0, 0)
| 70
|
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Optional[int] = (DDPMParallelScheduler,)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]:
a_ : Optional[int] = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**SCREAMING_SNAKE_CASE__ )
return config
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE__ , beta_end=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , sample_max_value=SCREAMING_SNAKE_CASE__ , )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
a_ : List[Any] = self.scheduler_classes[0]
a_ : Optional[Any] = self.get_scheduler_config()
a_ : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
a_ : int = self.scheduler_classes[0]
a_ : List[Any] = self.get_scheduler_config()
a_ : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
a_ : List[str] = len(SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = self.dummy_model()
a_ : str = self.dummy_sample_deter
a_ : int = self.dummy_sample_deter + 0.1
a_ : List[str] = self.dummy_sample_deter - 0.1
a_ : Dict = samplea.shape[0]
a_ : Any = torch.stack([samplea, samplea, samplea] , dim=0 )
a_ : Optional[int] = torch.arange(SCREAMING_SNAKE_CASE__ )[0:3, None].repeat(1 , SCREAMING_SNAKE_CASE__ )
a_ : Tuple = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
a_ : str = scheduler.batch_step_no_noise(SCREAMING_SNAKE_CASE__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
a_ : str = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
a_ : List[str] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 1153.1833 ) < 1E-2
assert abs(result_mean.item() - 0.5005 ) < 1E-3
def SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
a_ : Any = self.scheduler_classes[0]
a_ : List[str] = self.get_scheduler_config()
a_ : int = scheduler_class(**SCREAMING_SNAKE_CASE__ )
a_ : Dict = len(SCREAMING_SNAKE_CASE__ )
a_ : int = self.dummy_model()
a_ : Optional[Any] = self.dummy_sample_deter
a_ : Any = torch.manual_seed(0 )
for t in reversed(range(SCREAMING_SNAKE_CASE__ ) ):
# 1. predict noise residual
a_ : List[Any] = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# 2. predict previous mean of sample x_t-1
a_ : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).prev_sample
a_ : Union[str, Any] = pred_prev_sample
a_ : List[str] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
a_ : Dict = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
a_ : Any = self.scheduler_classes[0]
a_ : List[str] = self.get_scheduler_config(prediction_type='v_prediction' )
a_ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE__ )
a_ : Tuple = len(SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = self.dummy_model()
a_ : Optional[Any] = self.dummy_sample_deter
a_ : str = torch.manual_seed(0 )
for t in reversed(range(SCREAMING_SNAKE_CASE__ ) ):
# 1. predict noise residual
a_ : Any = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# 2. predict previous mean of sample x_t-1
a_ : int = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).prev_sample
a_ : List[str] = pred_prev_sample
a_ : Optional[Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
a_ : Optional[int] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
a_ : Optional[int] = self.scheduler_classes[0]
a_ : int = self.get_scheduler_config()
a_ : Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
a_ : int = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = scheduler.timesteps
for i, timestep in enumerate(SCREAMING_SNAKE_CASE__ ):
if i == len(SCREAMING_SNAKE_CASE__ ) - 1:
a_ : Any = -1
else:
a_ : Optional[int] = timesteps[i + 1]
a_ : Optional[Any] = scheduler.previous_timestep(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = prev_t.item()
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
a_ : Tuple = self.scheduler_classes[0]
a_ : List[Any] = self.get_scheduler_config()
a_ : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
a_ : str = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE__ , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
a_ : List[str] = self.scheduler_classes[0]
a_ : Dict = self.get_scheduler_config()
a_ : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = [1_0_0, 8_7, 5_0, 1, 0]
a_ : Optional[Any] = len(SCREAMING_SNAKE_CASE__ )
with self.assertRaises(SCREAMING_SNAKE_CASE__ , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE__ , timesteps=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
a_ : int = self.scheduler_classes[0]
a_ : str = self.get_scheduler_config()
a_ : Any = scheduler_class(**SCREAMING_SNAKE_CASE__ )
a_ : Dict = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE__ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE__ )
| 570
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case__ : Optional[Any] = logging.get_logger(__name__)
snake_case__ : int = {'''vocab_file''': '''sentencepiece.model'''}
snake_case__ : Optional[int] = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
}
snake_case__ : str = {
'''google/rembert''': 256,
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase_ :Any = VOCAB_FILES_NAMES
lowerCamelCase_ :int = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ :int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , snake_case_ , snake_case_=False , snake_case_=True , snake_case_=True , snake_case_="[CLS]" , snake_case_="[SEP]" , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , **snake_case_ , ):
'''simple docstring'''
super().__init__(
do_lower_case=snake_case_ , remove_space=snake_case_ , keep_accents=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , **snake_case_ , )
UpperCAmelCase_ : Dict = do_lower_case
UpperCAmelCase_ : int = remove_space
UpperCAmelCase_ : Any = keep_accents
UpperCAmelCase_ : Optional[int] = vocab_file
UpperCAmelCase_ : Optional[int] = spm.SentencePieceProcessor()
self.sp_model.Load(snake_case_ )
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Any = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self.__dict__.copy()
UpperCAmelCase_ : Any = None
return state
def __setstate__( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = d
UpperCAmelCase_ : int = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self , snake_case_ , snake_case_=False ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.sp_model.EncodeAsPieces(snake_case_ )
return pieces
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
return self.sp_model.PieceToId(snake_case_ )
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
return self.sp_model.IdToPiece(snake_case_ )
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.sp_model.decode_pieces(snake_case_ )
return out_string
def _UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase_ : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(snake_case_ )) + [1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1]
def _UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
'''simple docstring'''
UpperCAmelCase_ : Dict = [self.sep_token_id]
UpperCAmelCase_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
'''simple docstring'''
if not os.path.isdir(snake_case_ ):
logger.error('Vocabulary path ({}) should be a directory'.format(snake_case_ ) )
return
UpperCAmelCase_ : int = os.path.join(
snake_case_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ):
copyfile(self.vocab_file , snake_case_ )
return (out_vocab_file,)
| 703
|
'''simple docstring'''
def _lowerCamelCase ( lowerCamelCase_ : str , lowerCamelCase_ : int ):
"""simple docstring"""
return [sentence[i : i + ngram_size] for i in range(len(lowerCamelCase_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 389
| 0
|
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def A__ ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
_UpperCAmelCase = load_file(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
_UpperCAmelCase = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' )
_UpperCAmelCase = pipeline.text_encoder
else:
_UpperCAmelCase = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' )
_UpperCAmelCase = pipeline.unet
# find the target layer
_UpperCAmelCase = layer_infos.pop(0 )
while len(SCREAMING_SNAKE_CASE_ ) > -1:
try:
_UpperCAmelCase = curr_layer.__getattr__(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
_UpperCAmelCase = layer_infos.pop(0 )
elif len(SCREAMING_SNAKE_CASE_ ) == 0:
break
except Exception:
if len(SCREAMING_SNAKE_CASE_ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
_UpperCAmelCase = layer_infos.pop(0 )
_UpperCAmelCase = []
if "lora_down" in key:
pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) )
pair_keys.append(SCREAMING_SNAKE_CASE_ )
else:
pair_keys.append(SCREAMING_SNAKE_CASE_ )
pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
_UpperCAmelCase = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
_UpperCAmelCase = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).unsqueeze(2 ).unsqueeze(3 )
else:
_UpperCAmelCase = state_dict[pair_keys[0]].to(torch.floataa )
_UpperCAmelCase = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# update visited list
for item in pair_keys:
visited.append(SCREAMING_SNAKE_CASE_ )
return pipeline
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--base_model_path", default=None, type=str, required=True, help="Path to the base model in diffusers format."
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--lora_prefix_unet", default="lora_unet", type=str, help="The prefix of UNet weight in safetensors"
)
parser.add_argument(
"--lora_prefix_text_encoder",
default="lora_te",
type=str,
help="The prefix of text encoder weight in safetensors",
)
parser.add_argument("--alpha", default=0.75, type=float, help="The merging ratio in W = W0 + alpha * deltaW")
parser.add_argument(
"--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not."
)
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = args.base_model_path
UpperCAmelCase_ = args.checkpoint_path
UpperCAmelCase_ = args.dump_path
UpperCAmelCase_ = args.lora_prefix_unet
UpperCAmelCase_ = args.lora_prefix_text_encoder
UpperCAmelCase_ = args.alpha
UpperCAmelCase_ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
UpperCAmelCase_ = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 32
|
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=A__ ):
__A : str = ["""torch""", """scipy"""]
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(cls , ['''torch''', '''scipy'''] )
| 32
| 1
|
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
__snake_case : Tuple =logging.getLogger()
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''-f''')
lowerCAmelCase__ : Optional[Any] = parser.parse_args()
return args.f
def lowerCAmelCase__ ( lowerCamelCase_ : List[Any]):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = {}
lowerCAmelCase__ : Tuple = os.path.join(lowerCamelCase_ ,'''all_results.json''')
if os.path.exists(lowerCamelCase_):
with open(lowerCamelCase_ ,'''r''') as f:
lowerCAmelCase__ : Any = json.load(lowerCamelCase_)
else:
raise ValueError(f"""can't find {path}""")
return results
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
__snake_case : Optional[Any] =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
@classmethod
def lowerCAmelCase__ (cls ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = tempfile.mkdtemp()
lowerCAmelCase__ : str = os.path.join(cls.tmpdir ,'''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
lowerCAmelCase__ : Optional[Any] = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def lowerCAmelCase__ (cls ) -> Dict:
"""simple docstring"""
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ : List[Any] = f"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
lowerCAmelCase__ : List[Any] = get_results(__lowerCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] ,0.75 )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase ,'''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase ,'''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ : Optional[int] = f"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
lowerCAmelCase__ : Optional[Any] = get_results(__lowerCamelCase )
self.assertLess(result['''perplexity'''] ,1_00 )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase ,'''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase ,'''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
lowerCAmelCase__ : int = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ : str = f"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
lowerCAmelCase__ : List[Any] = get_results(__lowerCamelCase )
self.assertLess(result['''perplexity'''] ,42 )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase ,'''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase ,'''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = 7 if get_gpu_count() > 1 else 2
lowerCAmelCase__ : Optional[Any] = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ : int = f"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
lowerCAmelCase__ : List[str] = get_results(__lowerCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] ,0.75 )
self.assertLess(result['''train_loss'''] ,0.5 )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase ,'''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase ,'''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ : Dict = f"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
lowerCAmelCase__ : Optional[int] = get_results(__lowerCamelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] ,28 )
self.assertGreaterEqual(result['''eval_exact'''] ,28 )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase ,'''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase ,'''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
lowerCAmelCase__ : str = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ : str = f"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
lowerCAmelCase__ : Optional[Any] = get_results(__lowerCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] ,0.8 )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase ,'''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Any = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ : Union[str, Any] = f"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
lowerCAmelCase__ : Optional[Any] = get_results(__lowerCamelCase )
self.assertGreaterEqual(result['''eval_rouge1'''] ,10 )
self.assertGreaterEqual(result['''eval_rouge2'''] ,2 )
self.assertGreaterEqual(result['''eval_rougeL'''] ,7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] ,7 )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase ,'''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase ,'''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Any = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ : Any = f"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
lowerCAmelCase__ : Tuple = get_results(__lowerCamelCase )
self.assertGreaterEqual(result['''eval_bleu'''] ,30 )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase ,'''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase ,'''translation_no_trainer''' ) ) )
@slow
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Dict = logging.StreamHandler(sys.stdout )
logger.addHandler(__lowerCamelCase )
lowerCAmelCase__ : List[Any] = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ : Optional[int] = f"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
lowerCAmelCase__ : str = get_results(__lowerCamelCase )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] ,0.10 )
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Tuple = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ : Optional[int] = f"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
lowerCAmelCase__ : List[str] = get_results(__lowerCamelCase )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] ,0.6 )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase ,'''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase ,'''image_classification_no_trainer''' ) ) )
| 706
|
def lowerCAmelCase__ ( lowerCamelCase_ : int):
'''simple docstring'''
if length <= 0 or not isinstance(lowerCamelCase_ ,lowerCamelCase_):
raise ValueError('''Length must be a positive integer.''')
return [n * (2 * n - 1) for n in range(lowerCamelCase_)]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=1_0))
| 90
| 0
|
'''simple docstring'''
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
# Initialise PyTorch model
lowercase__ : Union[str, Any] = TaConfig.from_json_file(UpperCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
lowercase__ : List[str] = TaForConditionalGeneration(UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
__a: Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__a: Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 152
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 42
class UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
@register_to_config
def __init__( self , __lowerCAmelCase = 3 , __lowerCAmelCase = 3 , __lowerCAmelCase = ("DownEncoderBlock2D",) , __lowerCAmelCase = ("UpDecoderBlock2D",) , __lowerCAmelCase = (64,) , __lowerCAmelCase = 1 , __lowerCAmelCase = "silu" , __lowerCAmelCase = 3 , __lowerCAmelCase = 32 , __lowerCAmelCase = 256 , __lowerCAmelCase = 32 , __lowerCAmelCase = None , __lowerCAmelCase = 0.1_8_2_1_5 , __lowerCAmelCase = "group" , ) -> Any:
super().__init__()
# pass init params to Encoder
lowercase__ : Union[str, Any] = Encoder(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , down_block_types=__lowerCAmelCase , block_out_channels=__lowerCAmelCase , layers_per_block=__lowerCAmelCase , act_fn=__lowerCAmelCase , norm_num_groups=__lowerCAmelCase , double_z=__lowerCAmelCase , )
lowercase__ : str = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowercase__ : Tuple = nn.Convad(__lowerCAmelCase , __lowerCAmelCase , 1 )
lowercase__ : Dict = VectorQuantizer(__lowerCAmelCase , __lowerCAmelCase , beta=0.2_5 , remap=__lowerCAmelCase , sane_index_shape=__lowerCAmelCase )
lowercase__ : List[Any] = nn.Convad(__lowerCAmelCase , __lowerCAmelCase , 1 )
# pass init params to Decoder
lowercase__ : Optional[Any] = Decoder(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , up_block_types=__lowerCAmelCase , block_out_channels=__lowerCAmelCase , layers_per_block=__lowerCAmelCase , act_fn=__lowerCAmelCase , norm_num_groups=__lowerCAmelCase , norm_type=__lowerCAmelCase , )
@apply_forward_hook
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = True ) -> VQEncoderOutput:
lowercase__ : Optional[int] = self.encoder(__lowerCAmelCase )
lowercase__ : Tuple = self.quant_conv(__lowerCAmelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__lowerCAmelCase )
@apply_forward_hook
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = False , __lowerCAmelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
lowercase__ , lowercase__ , lowercase__ : str = self.quantize(__lowerCAmelCase )
else:
lowercase__ : int = h
lowercase__ : Optional[int] = self.post_quant_conv(__lowerCAmelCase )
lowercase__ : Union[str, Any] = self.decoder(__lowerCAmelCase , quant if self.config.norm_type == '''spatial''' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
lowercase__ : List[str] = sample
lowercase__ : Optional[Any] = self.encode(__lowerCAmelCase ).latents
lowercase__ : str = self.decode(__lowerCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowerCAmelCase )
| 152
| 1
|
import random
def A_ ( A__ , A__ ) -> tuple:
a__ , a__ , a__ : Any = [], [], []
for element in data:
if element < pivot:
less.append(A__ )
elif element > pivot:
greater.append(A__ )
else:
equal.append(A__ )
return less, equal, greater
def A_ ( A__ , A__ ) -> Tuple:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(A__ ) or index < 0:
return None
a__ : str = items[random.randint(0 , len(A__ ) - 1 )]
a__ : Optional[int] = 0
a__ , a__ , a__ : Any = _partition(A__ , A__ )
a__ : List[str] = len(A__ )
a__ : List[Any] = len(A__ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(A__ , A__ )
# must be in larger
else:
return quick_select(A__ , index - (m + count) )
| 392
|
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def A_ ( A__ ) -> float:
return np.dot(A__ , A__ )
class A__ :
"""simple docstring"""
def __init__( self , *,
lowercase = np.inf , lowercase = "linear" , lowercase = 0.0 , ) -> None:
'''simple docstring'''
a__ : int = regularization
a__ : int = gamma
if kernel == "linear":
a__ : Optional[Any] = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma')
if not isinstance(self.gamma , (float, int)):
raise ValueError('gamma must be float or int')
if not self.gamma > 0:
raise ValueError('gamma must be > 0')
a__ : Union[str, Any] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
a__ : List[str] = F'Unknown kernel: {kernel}'
raise ValueError(lowercase)
def __lowercase ( self , lowercase , lowercase) -> float:
'''simple docstring'''
return np.dot(lowercase , lowercase)
def __lowercase ( self , lowercase , lowercase) -> float:
'''simple docstring'''
return np.exp(-(self.gamma * norm_squared(vectora - vectora)))
def __lowercase ( self , lowercase , lowercase) -> None:
'''simple docstring'''
a__ : Tuple = observations
a__ : int = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((a__) , ) : Tuple = np.shape(lowercase)
def to_minimize(lowercase) -> float:
a__ : Tuple = 0
((a__) , ) : Optional[Any] = np.shape(lowercase)
for i in range(lowercase):
for j in range(lowercase):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j])
)
return 1 / 2 * s - sum(lowercase)
a__ : str = LinearConstraint(lowercase , 0 , 0)
a__ : List[Any] = Bounds(0 , self.regularization)
a__ : Optional[int] = minimize(
lowercase , np.ones(lowercase) , bounds=lowercase , constraints=[ly_contraint]).x
a__ : str = l_star
# calculating mean offset of separation plane to points
a__ : Optional[int] = 0
for i in range(lowercase):
for j in range(lowercase):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j])
a__ : str = s / n
def __lowercase ( self , lowercase) -> int:
'''simple docstring'''
a__ : int = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , lowercase)
for n in range(len(self.classes)))
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 392
| 1
|
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__A = MODEL_FOR_CAUSAL_LM_MAPPING
__A = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def __UpperCAmelCase ( self : Any) -> Tuple:
"""simple docstring"""
_UpperCamelCase = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="pt")
# Using `do_sample=False` to force deterministic output
_UpperCamelCase = text_generator("This is a test" , do_sample=lowercase_)
self.assertEqual(
lowercase_ , [
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
] , )
_UpperCamelCase = text_generator(["This is a test", "This is a second test"])
self.assertEqual(
lowercase_ , [
[
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
],
[
{
"generated_text": (
"This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"
" oscope. oscope. FiliFili@@"
)
}
],
] , )
_UpperCamelCase = text_generator("This is a test" , do_sample=lowercase_ , num_return_sequences=2 , return_tensors=lowercase_)
self.assertEqual(
lowercase_ , [
{"generated_token_ids": ANY(lowercase_)},
{"generated_token_ids": ANY(lowercase_)},
] , )
_UpperCamelCase = text_generator.model.config.eos_token_id
_UpperCamelCase = "<pad>"
_UpperCamelCase = text_generator(
["This is a test", "This is a second test"] , do_sample=lowercase_ , num_return_sequences=2 , batch_size=2 , return_tensors=lowercase_ , )
self.assertEqual(
lowercase_ , [
[
{"generated_token_ids": ANY(lowercase_)},
{"generated_token_ids": ANY(lowercase_)},
],
[
{"generated_token_ids": ANY(lowercase_)},
{"generated_token_ids": ANY(lowercase_)},
],
] , )
@require_tf
def __UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="tf")
# Using `do_sample=False` to force deterministic output
_UpperCamelCase = text_generator("This is a test" , do_sample=lowercase_)
self.assertEqual(
lowercase_ , [
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
] , )
_UpperCamelCase = text_generator(["This is a test", "This is a second test"] , do_sample=lowercase_)
self.assertEqual(
lowercase_ , [
[
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
],
[
{
"generated_text": (
"This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"
" Cannes 閲閲Cannes Cannes Cannes 攵 please,"
)
}
],
] , )
def __UpperCAmelCase ( self : List[str] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : int) -> str:
"""simple docstring"""
_UpperCamelCase = TextGenerationPipeline(model=lowercase_ , tokenizer=lowercase_)
return text_generator, ["This is a test", "Another test"]
def __UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
_UpperCamelCase = "Hello I believe in"
_UpperCamelCase = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2")
_UpperCamelCase = text_generator(lowercase_)
self.assertEqual(
lowercase_ , [{"generated_text": "Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"}] , )
_UpperCamelCase = text_generator(lowercase_ , stop_sequence=" fe")
self.assertEqual(lowercase_ , [{"generated_text": "Hello I believe in fe"}])
def __UpperCAmelCase ( self : int , lowercase_ : str , lowercase_ : int) -> List[str]:
"""simple docstring"""
_UpperCamelCase = text_generator.model
_UpperCamelCase = text_generator.tokenizer
_UpperCamelCase = text_generator("This is a test")
self.assertEqual(lowercase_ , [{"generated_text": ANY(lowercase_)}])
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test"))
_UpperCamelCase = text_generator("This is a test" , return_full_text=lowercase_)
self.assertEqual(lowercase_ , [{"generated_text": ANY(lowercase_)}])
self.assertNotIn("This is a test" , outputs[0]["generated_text"])
_UpperCamelCase = pipeline(task="text-generation" , model=lowercase_ , tokenizer=lowercase_ , return_full_text=lowercase_)
_UpperCamelCase = text_generator("This is a test")
self.assertEqual(lowercase_ , [{"generated_text": ANY(lowercase_)}])
self.assertNotIn("This is a test" , outputs[0]["generated_text"])
_UpperCamelCase = text_generator("This is a test" , return_full_text=lowercase_)
self.assertEqual(lowercase_ , [{"generated_text": ANY(lowercase_)}])
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test"))
_UpperCamelCase = text_generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=lowercase_)
self.assertEqual(
lowercase_ , [
[{"generated_text": ANY(lowercase_)}, {"generated_text": ANY(lowercase_)}],
[{"generated_text": ANY(lowercase_)}, {"generated_text": ANY(lowercase_)}],
] , )
if text_generator.tokenizer.pad_token is not None:
_UpperCamelCase = text_generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=lowercase_)
self.assertEqual(
lowercase_ , [
[{"generated_text": ANY(lowercase_)}, {"generated_text": ANY(lowercase_)}],
[{"generated_text": ANY(lowercase_)}, {"generated_text": ANY(lowercase_)}],
] , )
with self.assertRaises(lowercase_):
_UpperCamelCase = text_generator("test" , return_full_text=lowercase_ , return_text=lowercase_)
with self.assertRaises(lowercase_):
_UpperCamelCase = text_generator("test" , return_full_text=lowercase_ , return_tensors=lowercase_)
with self.assertRaises(lowercase_):
_UpperCamelCase = text_generator("test" , return_text=lowercase_ , return_tensors=lowercase_)
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
_UpperCamelCase = text_generator("")
self.assertEqual(lowercase_ , [{"generated_text": ANY(lowercase_)}])
else:
with self.assertRaises((ValueError, AssertionError)):
_UpperCamelCase = text_generator("")
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
_UpperCamelCase = ["RwkvForCausalLM", "XGLMForCausalLM", "GPTNeoXForCausalLM"]
if (
tokenizer.model_max_length < 10000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError)):
text_generator("This is a test" * 500 , max_new_tokens=20)
_UpperCamelCase = text_generator("This is a test" * 500 , handle_long_generation="hole" , max_new_tokens=20)
# Hole strategy cannot work
with self.assertRaises(lowercase_):
text_generator(
"This is a test" * 500 , handle_long_generation="hole" , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def __UpperCAmelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
import torch
# Classic `model_kwargs`
_UpperCamelCase = pipeline(
model="hf-internal-testing/tiny-random-bloom" , model_kwargs={"device_map": "auto", "torch_dtype": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0))
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa)
_UpperCamelCase = pipe("This is a test")
self.assertEqual(
lowercase_ , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
_UpperCamelCase = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.bfloataa)
self.assertEqual(pipe.model.device , torch.device(0))
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa)
_UpperCamelCase = pipe("This is a test")
self.assertEqual(
lowercase_ , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
_UpperCamelCase = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto")
self.assertEqual(pipe.model.device , torch.device(0))
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa)
_UpperCamelCase = pipe("This is a test")
self.assertEqual(
lowercase_ , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
@require_torch
@require_torch_gpu
def __UpperCAmelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
import torch
_UpperCamelCase = pipeline(model="hf-internal-testing/tiny-random-bloom" , device=0 , torch_dtype=torch.floataa)
pipe("This is a test")
@require_torch
@require_accelerate
@require_torch_gpu
def __UpperCAmelCase ( self : List[str]) -> str:
"""simple docstring"""
import torch
_UpperCamelCase = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.floataa)
pipe("This is a test" , do_sample=lowercase_ , top_p=0.5)
def __UpperCAmelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = "Hello world"
_UpperCamelCase = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2")
if text_generator.model.framework == "tf":
_UpperCamelCase = logging.get_logger("transformers.generation.tf_utils")
else:
_UpperCamelCase = logging.get_logger("transformers.generation.utils")
_UpperCamelCase = "Both `max_new_tokens`" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(lowercase_) as cl:
_UpperCamelCase = text_generator(lowercase_ , max_length=10 , max_new_tokens=1)
self.assertIn(lowercase_ , cl.out)
# The user only sets one -> no warning
with CaptureLogger(lowercase_) as cl:
_UpperCamelCase = text_generator(lowercase_ , max_new_tokens=1)
self.assertNotIn(lowercase_ , cl.out)
with CaptureLogger(lowercase_) as cl:
_UpperCamelCase = text_generator(lowercase_ , max_length=10)
self.assertNotIn(lowercase_ , cl.out)
| 547
|
from math import isqrt, loga
def lowerCAmelCase__ ( a__ ) ->list[int]:
'''simple docstring'''
_UpperCamelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , a__ , a__ ):
_UpperCamelCase = False
return [i for i in range(2 , a__ ) if is_prime[i]]
def lowerCAmelCase__ ( a__ = 800_800 , a__ = 800_800 ) ->int:
'''simple docstring'''
_UpperCamelCase = degree * loga(a__ )
_UpperCamelCase = int(a__ )
_UpperCamelCase = calculate_prime_numbers(a__ )
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = len(a__ ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"{solution() = }")
| 547
| 1
|
from __future__ import annotations
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = 0
__magic_name__ :Optional[int] = len(__lowerCAmelCase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
__magic_name__ :int = i + 1
else:
__magic_name__ :Dict = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{two_pointer([2, 7, 11, 15], 9) = }")
| 709
|
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
SCREAMING_SNAKE_CASE__ : List[str] = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"""text-classification""",
"""language-modeling""",
"""summarization""",
"""token-classification""",
"""question-answering""",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE__ : int = logging.getLogger()
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :int = argparse.ArgumentParser()
parser.add_argument('''-f''' )
__magic_name__ :Tuple = parser.parse_args()
return args.f
def __lowercase ( snake_case, snake_case="eval" ):
"""simple docstring"""
__magic_name__ :str = os.path.join(snake_case, f'''{split}_results.json''' )
if os.path.exists(snake_case ):
with open(snake_case, '''r''' ) as f:
return json.load(snake_case )
raise ValueError(f'''can\'t find {path}''' )
SCREAMING_SNAKE_CASE__ : Tuple = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCamelCase_ ( lowerCamelCase ):
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.get_auto_remove_tmp_dir()
__magic_name__ :Optional[Any] = F'''
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(__lowerCAmelCase , '''argv''' , __lowerCAmelCase ):
run_flax_glue.main()
__magic_name__ :int = get_results(__lowerCAmelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = self.get_auto_remove_tmp_dir()
__magic_name__ :Tuple = F'''
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(__lowerCAmelCase , '''argv''' , __lowerCAmelCase ):
run_clm_flax.main()
__magic_name__ :int = get_results(__lowerCAmelCase )
self.assertLess(result['''eval_perplexity'''] , 1_0_0 )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.get_auto_remove_tmp_dir()
__magic_name__ :List[str] = F'''
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
'''.split()
with patch.object(__lowerCAmelCase , '''argv''' , __lowerCAmelCase ):
run_summarization_flax.main()
__magic_name__ :int = get_results(__lowerCAmelCase , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 1_0 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = self.get_auto_remove_tmp_dir()
__magic_name__ :Optional[Any] = F'''
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
'''.split()
with patch.object(__lowerCAmelCase , '''argv''' , __lowerCAmelCase ):
run_mlm_flax.main()
__magic_name__ :int = get_results(__lowerCAmelCase )
self.assertLess(result['''eval_perplexity'''] , 4_2 )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = self.get_auto_remove_tmp_dir()
__magic_name__ :List[Any] = F'''
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(__lowerCAmelCase , '''argv''' , __lowerCAmelCase ):
run_ta_mlm_flax.main()
__magic_name__ :Dict = get_results(__lowerCAmelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def A ( self ):
"""simple docstring"""
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__magic_name__ :int = 7 if get_gpu_count() > 1 else 2
__magic_name__ :str = self.get_auto_remove_tmp_dir()
__magic_name__ :str = F'''
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
'''.split()
with patch.object(__lowerCAmelCase , '''argv''' , __lowerCAmelCase ):
run_flax_ner.main()
__magic_name__ :Any = get_results(__lowerCAmelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.get_auto_remove_tmp_dir()
__magic_name__ :int = F'''
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
'''.split()
with patch.object(__lowerCAmelCase , '''argv''' , __lowerCAmelCase ):
run_qa.main()
__magic_name__ :int = get_results(__lowerCAmelCase )
self.assertGreaterEqual(result['''eval_f1'''] , 3_0 )
self.assertGreaterEqual(result['''eval_exact'''] , 3_0 )
| 180
| 0
|
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _lowerCAmelCase ( ) -> Tuple:
__lowerCAmelCase = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=lowercase , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=lowercase , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=lowercase )
return parser.parse_args()
def _lowerCAmelCase ( ) -> str:
__lowerCAmelCase = parse_args()
# Import training_script as a module.
__lowerCAmelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__lowerCAmelCase = script_fpath.stem
__lowerCAmelCase = importlib.import_module(lowercase )
# Patch sys.argv
__lowerCAmelCase = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 689
|
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_a : Optional[int] = logging.get_logger(__name__)
_a : int = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_a : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
a : str =field(
default=lowerCAmelCase_ , metadata={"""help""": """Model type selected in the list: """ + """, """.join(lowerCAmelCase_ )} )
a : str =field(
default=lowerCAmelCase_ , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} )
a : int =field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a : int =field(
default=1_28 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , )
a : int =field(
default=64 , metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} , )
a : int =field(
default=30 , metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} , )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} )
a : float =field(
default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
a : int =field(
default=20 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
a : int =field(
default=0 , metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} , )
a : int =field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} )
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Optional[Any] ="""train"""
a : Optional[int] ="""dev"""
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : SquadDataTrainingArguments
a : List[SquadFeatures]
a : Split
a : bool
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = Split.train,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = "pt",):
'''simple docstring'''
__lowerCAmelCase = args
__lowerCAmelCase = is_language_sensitive
__lowerCAmelCase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
try:
__lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
__lowerCAmelCase = mode
# Load data features from cache or dataset file
__lowerCAmelCase = """v2""" if args.version_2_with_negative else """v1"""
__lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir,f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}',)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCAmelCase = cached_features_file + """.lock"""
with FileLock(__SCREAMING_SNAKE_CASE ):
if os.path.exists(__SCREAMING_SNAKE_CASE ) and not args.overwrite_cache:
__lowerCAmelCase = time.time()
__lowerCAmelCase = torch.load(__SCREAMING_SNAKE_CASE )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__lowerCAmelCase = self.old_features["""features"""]
__lowerCAmelCase = self.old_features.get("""dataset""",__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.old_features.get("""examples""",__SCREAMING_SNAKE_CASE )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]',time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
""" future run""" )
else:
if mode == Split.dev:
__lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
else:
__lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
__lowerCAmelCase , __lowerCAmelCase = squad_convert_examples_to_features(
examples=self.examples,tokenizer=__SCREAMING_SNAKE_CASE,max_seq_length=args.max_seq_length,doc_stride=args.doc_stride,max_query_length=args.max_query_length,is_training=mode == Split.train,threads=args.threads,return_dataset=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples},__SCREAMING_SNAKE_CASE,)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.features[i]
__lowerCAmelCase = torch.tensor(feature.input_ids,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.attention_mask,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.token_type_ids,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.cls_index,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.p_mask,dtype=torch.float )
__lowerCAmelCase = torch.tensor(feature.is_impossible,dtype=torch.float )
__lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape,dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__lowerCAmelCase = torch.tensor(feature.start_position,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.end_position,dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs
| 689
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A_ : Optional[int] = logging.get_logger(__name__)
A_ : Union[str, Any] = '▁'
A_ : Optional[int] = {'vocab_file': 'spiece.model'}
A_ : int = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
A_ : List[str] = {
'google/reformer-crime-and-punishment': 524_288,
}
class lowerCamelCase (SCREAMING_SNAKE_CASE_ ):
lowerCamelCase__ : int = VOCAB_FILES_NAMES
lowerCamelCase__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Tuple = ['input_ids', 'attention_mask']
def __init__( self : Optional[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str="</s>" , __UpperCAmelCase : int="<unk>" , __UpperCAmelCase : int=[] , __UpperCAmelCase : Dict = None , **__UpperCAmelCase : List[str] , ) -> Dict:
SCREAMING_SNAKE_CASE__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE__ = vocab_file
SCREAMING_SNAKE_CASE__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return self.sp_model.get_piece_size()
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ) -> str:
SCREAMING_SNAKE_CASE__ = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ = None
return state
def __setstate__( self : str , __UpperCAmelCase : Union[str, Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : Optional[Any] ) -> Optional[int]:
return self.sp_model.piece_to_id(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] , __UpperCAmelCase : List[Any] ) -> str:
if index < self.sp_model.get_piece_size():
SCREAMING_SNAKE_CASE__ = self.sp_model.IdToPiece(UpperCamelCase__ )
return token
def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCamelCase__ ) + token
SCREAMING_SNAKE_CASE__ = []
else:
current_sub_tokens.append(UpperCamelCase__ )
out_string += self.sp_model.decode(UpperCamelCase__ )
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : int , __UpperCAmelCase : str = None ) -> Union[str, Any]:
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , """wb""" ) as fi:
SCREAMING_SNAKE_CASE__ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
| 711
|
"""simple docstring"""
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
A_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
def A ( snake_case__ ):
'''simple docstring'''
warnings.warn(
"""The preprocess method is deprecated and will be removed in a future version. Please"""
""" use VaeImageProcessor.preprocess instead""" , snake_case__ , )
if isinstance(snake_case__ , torch.Tensor ):
return image
elif isinstance(snake_case__ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ = [image]
if isinstance(image[0] , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = image[0].size
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
SCREAMING_SNAKE_CASE__ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
SCREAMING_SNAKE_CASE__ = np.concatenate(snake_case__ , axis=0 )
SCREAMING_SNAKE_CASE__ = np.array(snake_case__ ).astype(np.floataa ) / 2_55.0
SCREAMING_SNAKE_CASE__ = image.transpose(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE__ = 2.0 * image - 1.0
SCREAMING_SNAKE_CASE__ = torch.from_numpy(snake_case__ )
elif isinstance(image[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE__ = torch.cat(snake_case__ , dim=0 )
return image
def A ( snake_case__ ):
'''simple docstring'''
if isinstance(snake_case__ , torch.Tensor ):
return mask
elif isinstance(snake_case__ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = mask[0].size
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
SCREAMING_SNAKE_CASE__ = [np.array(m.convert("""L""" ).resize((w, h) , resample=PIL_INTERPOLATION["""nearest"""] ) )[None, :] for m in mask]
SCREAMING_SNAKE_CASE__ = np.concatenate(snake_case__ , axis=0 )
SCREAMING_SNAKE_CASE__ = mask.astype(np.floataa ) / 2_55.0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = torch.from_numpy(snake_case__ )
elif isinstance(mask[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE__ = torch.cat(snake_case__ , dim=0 )
return mask
class lowerCamelCase (A__ ):
lowerCamelCase__ : UNetaDModel
lowerCamelCase__ : RePaintScheduler
def __init__( self : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict ) -> Tuple:
super().__init__()
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
@torch.no_grad()
def __call__( self : List[str] , __UpperCAmelCase : Union[torch.Tensor, PIL.Image.Image] , __UpperCAmelCase : Union[torch.Tensor, PIL.Image.Image] , __UpperCAmelCase : int = 2_5_0 , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : int = 1_0 , __UpperCAmelCase : int = 1_0 , __UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
SCREAMING_SNAKE_CASE__ = image
SCREAMING_SNAKE_CASE__ = _preprocess_image(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = original_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE__ = _preprocess_mask(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = mask_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE__ = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(__UpperCAmelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
SCREAMING_SNAKE_CASE__ = original_image.shape
SCREAMING_SNAKE_CASE__ = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , self.device )
SCREAMING_SNAKE_CASE__ = eta
SCREAMING_SNAKE_CASE__ = self.scheduler.timesteps[0] + 1
SCREAMING_SNAKE_CASE__ = generator[0] if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
SCREAMING_SNAKE_CASE__ = self.unet(__UpperCAmelCase , __UpperCAmelCase ).sample
# compute previous image: x_t -> x_t-1
SCREAMING_SNAKE_CASE__ = self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
SCREAMING_SNAKE_CASE__ = self.scheduler.undo_step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = t
SCREAMING_SNAKE_CASE__ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCAmelCase )
| 616
| 0
|
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def a ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] ) -> int:
__magic_name__: Optional[int] = 0
if start < end:
__magic_name__: Union[str, Any] = randint(__UpperCAmelCase , __UpperCAmelCase )
__magic_name__: int = a[end]
__magic_name__: Optional[int] = a[pivot]
__magic_name__: Tuple = temp
__magic_name__, __magic_name__: int = _in_place_partition(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
count += _in_place_quick_sort(__UpperCAmelCase , __UpperCAmelCase , p - 1 )
count += _in_place_quick_sort(__UpperCAmelCase , p + 1 , __UpperCAmelCase )
return count
def a ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : List[Any] ) -> Union[str, Any]:
__magic_name__: Union[str, Any] = 0
__magic_name__: str = randint(__UpperCAmelCase , __UpperCAmelCase )
__magic_name__: Optional[int] = a[end]
__magic_name__: Optional[int] = a[pivot]
__magic_name__: Optional[int] = temp
__magic_name__: Dict = start - 1
for index in range(__UpperCAmelCase , __UpperCAmelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
__magic_name__: List[Any] = new_pivot_index + 1
__magic_name__: Any = a[new_pivot_index]
__magic_name__: int = a[index]
__magic_name__: Union[str, Any] = temp
__magic_name__: List[Any] = a[new_pivot_index + 1]
__magic_name__: Union[str, Any] = a[end]
__magic_name__: Dict = temp
return new_pivot_index + 1, count
__lowerCamelCase = TemporaryFile()
__lowerCamelCase = 1_00 # 1000 elements are to be sorted
__lowerCamelCase , __lowerCamelCase = 0, 1 # mean and standard deviation
__lowerCamelCase = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
__lowerCamelCase = np.load(outfile)
__lowerCamelCase = len(M) - 1
__lowerCamelCase = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z)
| 96
|
from __future__ import annotations
import numpy as np
def UpperCamelCase_( snake_case__: np.ndarray ) -> tuple[np.ndarray, np.ndarray]:
UpperCAmelCase__ , UpperCAmelCase__ = np.shape(snake_case__ )
if rows != columns:
UpperCAmelCase__ = (
'\'table\' has to be of square shaped array but got a '
f"{rows}x{columns} array:\n{table}"
)
raise ValueError(snake_case__ )
UpperCAmelCase__ = np.zeros((rows, columns) )
UpperCAmelCase__ = np.zeros((rows, columns) )
for i in range(snake_case__ ):
for j in range(snake_case__ ):
UpperCAmelCase__ = sum(lower[i][k] * upper[k][j] for k in range(snake_case__ ) )
if upper[j][j] == 0:
raise ArithmeticError('No LU decomposition exists' )
UpperCAmelCase__ = (table[i][j] - total) / upper[j][j]
UpperCAmelCase__ = 1
for j in range(snake_case__ , snake_case__ ):
UpperCAmelCase__ = sum(lower[i][k] * upper[k][j] for k in range(snake_case__ ) )
UpperCAmelCase__ = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 146
| 0
|
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = inspect.getfile(accelerate.test_utils )
snake_case_ : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
snake_case_ : Optional[int] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
snake_case_ : Dict = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def _A ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
print(F'''Found {torch.cuda.device_count()} devices.''' )
snake_case_ : Union[str, Any] = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A_ , env=os.environ.copy() )
@require_multi_gpu
def _A ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
print(F'''Found {torch.cuda.device_count()} devices.''' )
snake_case_ : Dict = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(F'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A_ , env=os.environ.copy() )
@require_multi_gpu
def _A ( self :Tuple ) -> Dict:
'''simple docstring'''
snake_case_ : str = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A_ , env=os.environ.copy() )
@require_multi_gpu
def _A ( self :str ) -> str:
'''simple docstring'''
print(F'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
snake_case_ : Union[str, Any] = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(A_ , env=os.environ.copy() )
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = Accelerator()
__lowerCamelCase : Any = (accelerator.state.process_index + 2, 10)
__lowerCamelCase : List[Any] = torch.randint(0, 10, shape).to(accelerator.device)
__lowerCamelCase : Optional[Any] = ''''''
__lowerCamelCase : str = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
__lowerCamelCase : Optional[int] = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
__lowerCamelCase : Optional[int] = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 700
|
'''simple docstring'''
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=() ,__magic_name__=None ,__magic_name__="no" ,__magic_name__="29500" )-> Optional[int]:
"""simple docstring"""
snake_case_ : str = False
snake_case_ : int = False
if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ):
snake_case_ : Any = True
elif "IPython" in sys.modules:
snake_case_ : Union[str, Any] = "google.colab" in str(sys.modules["IPython"].get_ipython() )
try:
snake_case_ : Any = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" ,__magic_name__ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside "
"your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if num_processes is None:
snake_case_ : Tuple = 8
snake_case_ : Optional[int] = PrepareForLaunch(__magic_name__ ,distributed_type="TPU" )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on one CPU." )
function(*__magic_name__ )
else:
if num_processes is None:
raise ValueError(
"You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized "
"inside your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if torch.cuda.is_initialized():
raise ValueError(
"To launch a multi-GPU training from your notebook, you need to avoid running any instruction "
"using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA "
"function." )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__magic_name__ ,master_addr="127.0.01" ,master_port=__magic_name__ ,mixed_precision=__magic_name__ ):
snake_case_ : Optional[int] = PrepareForLaunch(__magic_name__ ,distributed_type="MULTI_GPU" )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. "
"This likely stems from an outside import causing issues once the `notebook_launcher()` is called. "
"Please review your imports and test them when running the `notebook_launcher()` to identify "
"which one is problematic." ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
snake_case_ : Any = "1"
print("Launching training on MPS." )
elif torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on CPU." )
function(*__magic_name__ )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=() ,__magic_name__=2 )-> Dict:
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__magic_name__ ,master_addr="127.0.01" ,master_port="29500" ,accelerate_mixed_precision="no" ,accelerate_debug_rdv_file=tmp_file.name ,accelerate_use_cpu="yes" ,):
snake_case_ : Any = PrepareForLaunch(__magic_name__ ,debug=__magic_name__ )
start_processes(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
| 656
| 0
|
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCamelCase_ = 1_6
lowerCamelCase_ = 3_2
def lowerCamelCase ( a_ , a_ = 16 ) -> Tuple:
lowerCAmelCase_ = AutoTokenizer.from_pretrained('bert-base-cased' )
lowerCAmelCase_ = load_dataset('glue' , 'mrpc' )
def tokenize_function(a_ ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=a_ , max_length=a_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase_ = datasets.map(
a_ , batched=a_ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(a_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase_ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase_ = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase_ = 8
else:
lowerCAmelCase_ = None
return tokenizer.pad(
a_ , padding='longest' , max_length=a_ , pad_to_multiple_of=a_ , return_tensors='pt' , )
# Instantiate dataloaders.
lowerCAmelCase_ = DataLoader(
tokenized_datasets['train'] , shuffle=a_ , collate_fn=a_ , batch_size=a_ )
lowerCAmelCase_ = DataLoader(
tokenized_datasets['validation'] , shuffle=a_ , collate_fn=a_ , batch_size=a_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCamelCase_ = mocked_dataloaders # noqa: F811
def lowerCamelCase ( a_ , a_ ) -> Dict:
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , a_ ) == "1":
lowerCAmelCase_ = 2
# Initialize accelerator
lowerCAmelCase_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ = config['lr']
lowerCAmelCase_ = int(config['num_epochs'] )
lowerCAmelCase_ = int(config['seed'] )
lowerCAmelCase_ = int(config['batch_size'] )
lowerCAmelCase_ = evaluate.load('glue' , 'mrpc' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=a_ )
def inner_training_loop(a_ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(a_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=a_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase_ = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase_ = AdamW(params=model.parameters() , lr=a_ )
lowerCAmelCase_ , lowerCAmelCase_ = get_dataloaders(a_ , a_ )
# Instantiate scheduler
lowerCAmelCase_ = get_linear_schedule_with_warmup(
optimizer=a_ , num_warmup_steps=100 , num_training_steps=(len(a_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ )
# Now we train the model
for epoch in range(a_ ):
model.train()
for step, batch in enumerate(a_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCAmelCase_ = model(**a_ )
lowerCAmelCase_ = outputs.loss
accelerator.backward(a_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(a_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ = model(**a_ )
lowerCAmelCase_ = outputs.logits.argmax(dim=-1 )
lowerCAmelCase_ , lowerCAmelCase_ = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=a_ , references=a_ , )
lowerCAmelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , a_ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def lowerCamelCase ( ) -> Tuple:
lowerCAmelCase_ = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=a_ , default=a_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(a_ , a_ )
if __name__ == "__main__":
main()
| 318
|
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def lowerCamelCase ( a_ , a_ , a_ , a_ ) -> List[Any]:
if isinstance(a_ , a_ ):
lowerCAmelCase_ = np.full((len(a_ ), sequence_length, 2) , a_ )
else:
lowerCAmelCase_ = np.full((len(a_ ), sequence_length) , a_ )
for i, tensor in enumerate(a_ ):
if padding_side == "right":
if isinstance(a_ , a_ ):
lowerCAmelCase_ = tensor[:sequence_length]
else:
lowerCAmelCase_ = tensor[:sequence_length]
else:
if isinstance(a_ , a_ ):
lowerCAmelCase_ = tensor[:sequence_length]
else:
lowerCAmelCase_ = tensor[:sequence_length]
return out_tensor.tolist()
def lowerCamelCase ( a_ ) -> Union[str, Any]:
lowerCAmelCase_ = ord(a_ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
lowerCAmelCase_ = unicodedata.category(a_ )
if cat.startswith('P' ):
return True
return False
@dataclass
class a_ ( a_ ):
'''simple docstring'''
__a: PreTrainedTokenizerBase
__a: Union[bool, str, PaddingStrategy] = True
__a: Optional[int] = None
__a: Optional[int] = None
__a: int = -1_0_0
__a: str = "pt"
def _lowercase ( self , lowercase_ ) -> Dict:
'''simple docstring'''
import torch
lowerCAmelCase_ = 'label' if 'label' in features[0].keys() else 'labels'
lowerCAmelCase_ = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
lowerCAmelCase_ = self.tokenizer.pad(
lowercase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' if labels is None else None , )
if labels is None:
return batch
lowerCAmelCase_ = torch.tensor(batch['entity_ids'] ).shape[1]
lowerCAmelCase_ = self.tokenizer.padding_side
if padding_side == "right":
lowerCAmelCase_ = [
list(lowercase_ ) + [self.label_pad_token_id] * (sequence_length - len(lowercase_ )) for label in labels
]
else:
lowerCAmelCase_ = [
[self.label_pad_token_id] * (sequence_length - len(lowercase_ )) + list(lowercase_ ) for label in labels
]
lowerCAmelCase_ = [feature['ner_tags'] for feature in features]
lowerCAmelCase_ = padding_tensor(lowercase_ , -1 , lowercase_ , lowercase_ )
lowerCAmelCase_ = [feature['original_entity_spans'] for feature in features]
lowerCAmelCase_ = padding_tensor(lowercase_ , (-1, -1) , lowercase_ , lowercase_ )
lowerCAmelCase_ = {k: torch.tensor(lowercase_ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 318
| 1
|
"""simple docstring"""
def snake_case__ ( _snake_case : int ):
"""simple docstring"""
UpperCamelCase__ = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def snake_case__ ( _snake_case : int = 1_00 ):
"""simple docstring"""
UpperCamelCase__ = 1
UpperCamelCase__ = 2
for i in range(2 , max_n + 1 ):
UpperCamelCase__ = pre_numerator
UpperCamelCase__ = 2 * i // 3 if i % 3 == 0 else 1
UpperCamelCase__ = cur_numerator
UpperCamelCase__ = e_cont * pre_numerator + temp
return sum_digits(_snake_case )
if __name__ == "__main__":
print(F"{solution() = }")
| 304
|
"""simple docstring"""
def snake_case__ ( _snake_case : int , _snake_case : int , _snake_case : int ):
"""simple docstring"""
if exponent == 1:
return base
if exponent % 2 == 0:
UpperCamelCase__ = _modexpt(_snake_case , exponent // 2 , _snake_case ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(_snake_case , exponent - 1 , _snake_case )) % modulo_value
def snake_case__ ( _snake_case : int = 17_77 , _snake_case : int = 18_55 , _snake_case : int = 8 ):
"""simple docstring"""
UpperCamelCase__ = base
for _ in range(1 , _snake_case ):
UpperCamelCase__ = _modexpt(_snake_case , _snake_case , 10**digits )
return result
if __name__ == "__main__":
print(F"{solution() = }")
| 304
| 1
|
"""simple docstring"""
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
__UpperCAmelCase : Any = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def A ( _A, _A, _A, _A, _A, _A, _A, _A=False, ):
"""simple docstring"""
output_path.parent.mkdir(parents=_A, exist_ok=_A )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
_A, _A, f=output_path.as_posix(), input_names=_A, output_names=_A, dynamic_axes=_A, do_constant_folding=_A, use_external_data_format=_A, enable_onnx_checker=_A, opset_version=_A, )
else:
export(
_A, _A, f=output_path.as_posix(), input_names=_A, output_names=_A, dynamic_axes=_A, do_constant_folding=_A, opset_version=_A, )
@torch.no_grad()
def A ( _A, _A, _A, _A = False ):
"""simple docstring"""
snake_case_ :Optional[Any] = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
snake_case_ :Optional[Any] = "cuda"
elif fpaa and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA" )
else:
snake_case_ :int = "cpu"
snake_case_ :List[str] = StableDiffusionPipeline.from_pretrained(_A, torch_dtype=_A ).to(_A )
snake_case_ :Optional[int] = Path(_A )
# TEXT ENCODER
snake_case_ :Any = pipeline.text_encoder.config.max_position_embeddings
snake_case_ :Union[str, Any] = pipeline.text_encoder.config.hidden_size
snake_case_ :List[str] = pipeline.tokenizer(
"A sample prompt", padding="max_length", max_length=pipeline.tokenizer.model_max_length, truncation=_A, return_tensors="pt", )
onnx_export(
pipeline.text_encoder, model_args=(text_input.input_ids.to(device=_A, dtype=torch.intaa )), output_path=output_path / "text_encoder" / "model.onnx", ordered_input_names=["input_ids"], output_names=["last_hidden_state", "pooler_output"], dynamic_axes={
"input_ids": {0: "batch", 1: "sequence"},
}, opset=_A, )
del pipeline.text_encoder
# UNET
snake_case_ :Union[str, Any] = pipeline.unet.config.in_channels
snake_case_ :Union[str, Any] = pipeline.unet.config.sample_size
snake_case_ :str = output_path / "unet" / "model.onnx"
onnx_export(
pipeline.unet, model_args=(
torch.randn(2, _A, _A, _A ).to(device=_A, dtype=_A ),
torch.randn(2 ).to(device=_A, dtype=_A ),
torch.randn(2, _A, _A ).to(device=_A, dtype=_A ),
False,
), output_path=_A, ordered_input_names=["sample", "timestep", "encoder_hidden_states", "return_dict"], output_names=["out_sample"], dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"timestep": {0: "batch"},
"encoder_hidden_states": {0: "batch", 1: "sequence"},
}, opset=_A, use_external_data_format=_A, )
snake_case_ :Dict = str(unet_path.absolute().as_posix() )
snake_case_ :Dict = os.path.dirname(_A )
snake_case_ :str = onnx.load(_A )
# clean up existing tensor files
shutil.rmtree(_A )
os.mkdir(_A )
# collate external tensor files into one
onnx.save_model(
_A, _A, save_as_external_data=_A, all_tensors_to_one_file=_A, location="weights.pb", convert_attribute=_A, )
del pipeline.unet
# VAE ENCODER
snake_case_ :Tuple = pipeline.vae
snake_case_ :Optional[Any] = vae_encoder.config.in_channels
snake_case_ :Tuple = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
snake_case_ :List[Any] = lambda _A, _A : vae_encoder.encode(_A, _A )[0].sample()
onnx_export(
_A, model_args=(
torch.randn(1, _A, _A, _A ).to(device=_A, dtype=_A ),
False,
), output_path=output_path / "vae_encoder" / "model.onnx", ordered_input_names=["sample", "return_dict"], output_names=["latent_sample"], dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
}, opset=_A, )
# VAE DECODER
snake_case_ :List[Any] = pipeline.vae
snake_case_ :Optional[int] = vae_decoder.config.latent_channels
snake_case_ :Dict = vae_decoder.config.out_channels
# forward only through the decoder part
snake_case_ :Union[str, Any] = vae_encoder.decode
onnx_export(
_A, model_args=(
torch.randn(1, _A, _A, _A ).to(device=_A, dtype=_A ),
False,
), output_path=output_path / "vae_decoder" / "model.onnx", ordered_input_names=["latent_sample", "return_dict"], output_names=["sample"], dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
}, opset=_A, )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
snake_case_ :str = pipeline.safety_checker
snake_case_ :Any = safety_checker.config.vision_config.num_channels
snake_case_ :List[Any] = safety_checker.config.vision_config.image_size
snake_case_ :Union[str, Any] = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker, model_args=(
torch.randn(
1, _A, _A, _A, ).to(device=_A, dtype=_A ),
torch.randn(1, _A, _A, _A ).to(device=_A, dtype=_A ),
), output_path=output_path / "safety_checker" / "model.onnx", ordered_input_names=["clip_input", "images"], output_names=["out_images", "has_nsfw_concepts"], dynamic_axes={
"clip_input": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"images": {0: "batch", 1: "height", 2: "width", 3: "channels"},
}, opset=_A, )
del pipeline.safety_checker
snake_case_ :Union[str, Any] = OnnxRuntimeModel.from_pretrained(output_path / "safety_checker" )
snake_case_ :List[Any] = pipeline.feature_extractor
else:
snake_case_ :Tuple = None
snake_case_ :Tuple = None
snake_case_ :Tuple = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_encoder" ), vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_decoder" ), text_encoder=OnnxRuntimeModel.from_pretrained(output_path / "text_encoder" ), tokenizer=pipeline.tokenizer, unet=OnnxRuntimeModel.from_pretrained(output_path / "unet" ), scheduler=pipeline.scheduler, safety_checker=_A, feature_extractor=_A, requires_safety_checker=safety_checker is not None, )
onnx_pipeline.save_pretrained(_A )
print("ONNX pipeline saved to", _A )
del pipeline
del onnx_pipeline
snake_case_ :int = OnnxStableDiffusionPipeline.from_pretrained(_A, provider="CPUExecutionProvider" )
print("ONNX pipeline is loadable" )
if __name__ == "__main__":
__UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=14,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
__UpperCAmelCase : Tuple = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 584
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def A ( _A, _A ):
"""simple docstring"""
snake_case_ :List[str] = list(_A )
snake_case_ :Any = list(_A )
snake_case_ :Optional[Any] = 0
for i in range(len(_A ) ):
if lista[i] != lista[i]:
count += 1
snake_case_ :Optional[int] = "_"
if count > 1:
return False
else:
return "".join(_A )
def A ( _A ):
"""simple docstring"""
snake_case_ :Tuple = []
while True:
snake_case_ :int = ["$"] * len(_A )
snake_case_ :Union[str, Any] = []
for i in range(len(_A ) ):
for j in range(i + 1, len(_A ) ):
snake_case_ :Dict = compare_string(binary[i], binary[j] )
if k is False:
snake_case_ :Tuple = "*"
snake_case_ :List[str] = "*"
temp.append("X" )
for i in range(len(_A ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_A ) == 0:
return pi
snake_case_ :Dict = list(set(_A ) )
def A ( _A, _A ):
"""simple docstring"""
snake_case_ :Optional[int] = []
for minterm in minterms:
snake_case_ :Tuple = ""
for _ in range(_A ):
snake_case_ :Optional[int] = str(minterm % 2 ) + string
minterm //= 2
temp.append(_A )
return temp
def A ( _A, _A, _A ):
"""simple docstring"""
snake_case_ :Tuple = list(_A )
snake_case_ :List[str] = list(_A )
snake_case_ :Dict = 0
for i in range(len(_A ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def A ( _A, _A ):
"""simple docstring"""
snake_case_ :List[Any] = []
snake_case_ :List[Any] = [0] * len(_A )
for i in range(len(chart[0] ) ):
snake_case_ :List[Any] = 0
snake_case_ :Optional[Any] = -1
for j in range(len(_A ) ):
if chart[j][i] == 1:
count += 1
snake_case_ :Dict = j
if count == 1:
snake_case_ :str = 1
for i in range(len(_A ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_A ) ):
snake_case_ :str = 0
temp.append(prime_implicants[i] )
while True:
snake_case_ :Any = 0
snake_case_ :Optional[int] = -1
snake_case_ :List[Any] = 0
for i in range(len(_A ) ):
snake_case_ :str = chart[i].count(1 )
if count_n > max_n:
snake_case_ :Optional[Any] = count_n
snake_case_ :List[str] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_A ) ):
snake_case_ :Any = 0
def A ( _A, _A ):
"""simple docstring"""
snake_case_ :Optional[Any] = [[0 for x in range(len(_A ) )] for x in range(len(_A ) )]
for i in range(len(_A ) ):
snake_case_ :Dict = prime_implicants[i].count("_" )
for j in range(len(_A ) ):
if is_for_table(prime_implicants[i], binary[j], _A ):
snake_case_ :Optional[int] = 1
return chart
def A ( ):
"""simple docstring"""
snake_case_ :str = int(input("Enter the no. of variables\n" ) )
snake_case_ :Dict = [
float(_A )
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split()
]
snake_case_ :Tuple = decimal_to_binary(_A, _A )
snake_case_ :Tuple = check(_A )
print("Prime Implicants are:" )
print(_A )
snake_case_ :List[Any] = prime_implicant_chart(_A, _A )
snake_case_ :int = selection(_A, _A )
print("Essential Prime Implicants are:" )
print(_A )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 584
| 1
|
"""simple docstring"""
import functools
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : Dict ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not all(isinstance(UpperCamelCase__ , UpperCamelCase__ ) for day in days ):
raise ValueError("""The parameter days should be a list of integers""" )
if len(UpperCamelCase__ ) != 3 or not all(isinstance(UpperCamelCase__ , UpperCamelCase__ ) for cost in costs ):
raise ValueError("""The parameter costs should be a list of three integers""" )
if len(UpperCamelCase__ ) == 0:
return 0
if min(UpperCamelCase__ ) <= 0:
raise ValueError("""All days elements should be greater than 0""" )
if max(UpperCamelCase__ ) >= 366:
raise ValueError("""All days elements should be less than 366""" )
A__ = set(UpperCamelCase__ )
@functools.cache
def dynamic_programming(UpperCAmelCase_ : Optional[int] ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720
|
"""simple docstring"""
from collections import defaultdict
def _snake_case ( UpperCAmelCase_ : int ):
A__ = 1
A__ = True
for v in tree[start]:
if v not in visited:
ret += dfs(UpperCAmelCase_ )
if ret % 2 == 0:
cuts.append(UpperCAmelCase_ )
return ret
def _snake_case ( ):
dfs(1 )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ : Any = 1_0, 9
SCREAMING_SNAKE_CASE_ : Any = defaultdict(list)
SCREAMING_SNAKE_CASE_ : dict[int, bool] = {}
SCREAMING_SNAKE_CASE_ : list[int] = []
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
SCREAMING_SNAKE_CASE_ : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (1_0, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 500
| 0
|
lowerCamelCase_ = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def __magic_name__ ( __a : bytes ):
'''simple docstring'''
if not isinstance(__a , __a ):
UpperCamelCase__ = f"a bytes-like object is required, not '{data.__class__.__name__}'"
raise TypeError(__a )
UpperCamelCase__ = """""".join(bin(__a )[2:].zfill(8 ) for byte in data )
UpperCamelCase__ = len(__a ) % 6 != 0
if padding_needed:
# The padding that will be added later
UpperCamelCase__ = b"""=""" * ((6 - len(__a ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(__a ) % 6)
else:
UpperCamelCase__ = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(__a ) , 6 ) ).encode()
+ padding
)
def __magic_name__ ( __a : str ):
'''simple docstring'''
if not isinstance(__a , __a ) and not isinstance(__a , __a ):
UpperCamelCase__ = (
"""argument should be a bytes-like object or ASCII string, """
f"not '{encoded_data.__class__.__name__}'"
)
raise TypeError(__a )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(__a , __a ):
try:
UpperCamelCase__ = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
UpperCamelCase__ = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(__a ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
UpperCamelCase__ = encoded_data[:-padding]
UpperCamelCase__ = """""".join(
bin(B64_CHARSET.index(__a ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
UpperCamelCase__ = """""".join(
bin(B64_CHARSET.index(__a ) )[2:].zfill(6 ) for char in encoded_data )
UpperCamelCase__ = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(__a ) , 8 )
]
return bytes(__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 513
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase_ = abspath(join(dirname(dirname(dirname(__file__))), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __magic_name__ ( __a : Union[str, Any] ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__a )
def __magic_name__ ( __a : Tuple ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
UpperCamelCase__ = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(__a , id=__a )
| 513
| 1
|
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int = 3 , SCREAMING_SNAKE_CASE_ : int = 7 , SCREAMING_SNAKE_CASE_ : int = 1_0_0_0_0_0_0 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = 0
SCREAMING_SNAKE_CASE_ : List[str] = 1
for current_denominator in range(1 , limit + 1 ):
SCREAMING_SNAKE_CASE_ : Any = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
SCREAMING_SNAKE_CASE_ : Optional[Any] = current_numerator
SCREAMING_SNAKE_CASE_ : Optional[Any] = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_0_0_0_0_0_0))
| 710
|
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self , lowercase__ , lowercase__=2 , lowercase__=3 , lowercase__=4 , lowercase__=2 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=99 , lowercase__=36 , lowercase__=2 , lowercase__=4 , lowercase__=37 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=16 , lowercase__=2 , lowercase__=0.02 , lowercase__=6 , lowercase__=6 , lowercase__=3 , lowercase__=4 , lowercase__=None , lowercase__=1000 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = parent
SCREAMING_SNAKE_CASE_ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE_ : Dict = num_channels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_size
SCREAMING_SNAKE_CASE_ : Optional[int] = patch_size
SCREAMING_SNAKE_CASE_ : str = is_training
SCREAMING_SNAKE_CASE_ : str = use_input_mask
SCREAMING_SNAKE_CASE_ : Any = use_token_type_ids
SCREAMING_SNAKE_CASE_ : int = use_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE_ : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Any = num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : str = hidden_act
SCREAMING_SNAKE_CASE_ : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Dict = type_vocab_size
SCREAMING_SNAKE_CASE_ : List[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Tuple = initializer_range
SCREAMING_SNAKE_CASE_ : List[str] = coordinate_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = shape_size
SCREAMING_SNAKE_CASE_ : List[str] = num_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = num_choices
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scope
SCREAMING_SNAKE_CASE_ : Dict = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_seq_length
SCREAMING_SNAKE_CASE_ : Tuple = (image_size // patch_size) ** 2 + 1
SCREAMING_SNAKE_CASE_ : Optional[int] = self.text_seq_length + self.image_seq_length
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
SCREAMING_SNAKE_CASE_ : Dict = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE_ : Optional[int] = bbox[i, j, 3]
SCREAMING_SNAKE_CASE_ : str = bbox[i, j, 1]
SCREAMING_SNAKE_CASE_ : Dict = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE_ : List[Any] = bbox[i, j, 2]
SCREAMING_SNAKE_CASE_ : Dict = bbox[i, j, 0]
SCREAMING_SNAKE_CASE_ : Tuple = tmp_coordinate
SCREAMING_SNAKE_CASE_ : Dict = tf.constant(lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : Dict = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Any = random_attention_mask([self.batch_size, self.text_seq_length] )
SCREAMING_SNAKE_CASE_ : List[str] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : Tuple = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ : str = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFLayoutLMvaModel(config=lowercase__ )
# text + image
SCREAMING_SNAKE_CASE_ : int = model(lowercase__ , pixel_values=lowercase__ , training=lowercase__ )
SCREAMING_SNAKE_CASE_ : str = model(
lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , training=lowercase__ , )
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , training=lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
SCREAMING_SNAKE_CASE_ : Tuple = model(lowercase__ , training=lowercase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
SCREAMING_SNAKE_CASE_ : int = model({"pixel_values": pixel_values} , training=lowercase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.num_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFLayoutLMvaForSequenceClassification(config=lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = model(
lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , training=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.num_labels
SCREAMING_SNAKE_CASE_ : Any = TFLayoutLMvaForTokenClassification(config=lowercase__ )
SCREAMING_SNAKE_CASE_ : int = model(
lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , training=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = 2
SCREAMING_SNAKE_CASE_ : List[Any] = TFLayoutLMvaForQuestionAnswering(config=lowercase__ )
SCREAMING_SNAKE_CASE_ : int = model(
lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ , training=lowercase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_)) : Any = config_and_inputs
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase,_UpperCAmelCase,unittest.TestCase ):
_A = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
_A = (
{"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
_A = False
_A = False
_A = False
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
return True
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = copy.deepcopy(lowercase__ )
if model_class in get_values(lowercase__ ):
SCREAMING_SNAKE_CASE_ : str = {
k: tf.tile(tf.expand_dims(lowercase__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(lowercase__ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowercase__ ):
SCREAMING_SNAKE_CASE_ : Tuple = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowercase__ ):
SCREAMING_SNAKE_CASE_ : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
SCREAMING_SNAKE_CASE_ : List[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowercase__ ):
SCREAMING_SNAKE_CASE_ : List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowercase__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = TFLayoutLMvaModelTester(self )
SCREAMING_SNAKE_CASE_ : int = ConfigTester(self , config_class=lowercase__ , hidden_size=37 )
def __lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : int = model_class(lowercase__ )
if getattr(lowercase__ , "hf_compute_loss" , lowercase__ ):
# The number of elements in the loss should be the same as the number of elements in the label
SCREAMING_SNAKE_CASE_ : Dict = self._prepare_for_class(inputs_dict.copy() , lowercase__ , return_labels=lowercase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowercase__ )[0]
]
SCREAMING_SNAKE_CASE_ : Any = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
SCREAMING_SNAKE_CASE_ : Dict = self._prepare_for_class(inputs_dict.copy() , lowercase__ , return_labels=lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = prepared_for_class.pop("input_ids" )
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase__ , **lowercase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
SCREAMING_SNAKE_CASE_ : List[Any] = self._prepare_for_class(inputs_dict.copy() , lowercase__ , return_labels=lowercase__ )
SCREAMING_SNAKE_CASE_ : int = prepared_for_class.pop("input_ids" )
if "labels" in prepared_for_class:
SCREAMING_SNAKE_CASE_ : str = prepared_for_class["labels"].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
SCREAMING_SNAKE_CASE_ : str = -100
SCREAMING_SNAKE_CASE_ : str = tf.convert_to_tensor(lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(lowercase__ , **lowercase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
SCREAMING_SNAKE_CASE_ : List[Any] = self._prepare_for_class(inputs_dict.copy() , lowercase__ , return_labels=lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(lowercase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
SCREAMING_SNAKE_CASE_ : Optional[Any] = self._prepare_for_class(inputs_dict.copy() , lowercase__ , return_labels=lowercase__ )
# Get keys that were added with the _prepare_for_class function
SCREAMING_SNAKE_CASE_ : int = prepared_for_class.keys() - inputs_dict.keys()
SCREAMING_SNAKE_CASE_ : Optional[int] = inspect.signature(model.call ).parameters
SCREAMING_SNAKE_CASE_ : Tuple = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
SCREAMING_SNAKE_CASE_ : List[Any] = {0: "input_ids"}
for label_key in label_keys:
SCREAMING_SNAKE_CASE_ : Optional[int] = signature_names.index(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = label_key
SCREAMING_SNAKE_CASE_ : List[str] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
SCREAMING_SNAKE_CASE_ : List[str] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
SCREAMING_SNAKE_CASE_ : List[str] = prepared_for_class[value]
SCREAMING_SNAKE_CASE_ : List[Any] = tuple(lowercase__ )
# Send to model
SCREAMING_SNAKE_CASE_ : int = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __lowerCamelCase ( self ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
),
) : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
),
) : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ : List[str] = type
self.model_tester.create_and_check_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
),
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
),
) : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
),
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
@slow
def __lowerCamelCase ( self ):
"""simple docstring"""
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFLayoutLMvaModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self ):
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=lowercase__ ) if is_vision_available() else None
@slow
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = TFLayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" )
SCREAMING_SNAKE_CASE_ : Any = self.default_image_processor
SCREAMING_SNAKE_CASE_ : Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_processor(images=lowercase__ , return_tensors="tf" ).pixel_values
SCREAMING_SNAKE_CASE_ : Dict = tf.constant([[1, 2]] )
SCREAMING_SNAKE_CASE_ : Any = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
SCREAMING_SNAKE_CASE_ : List[Any] = model(input_ids=lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , training=lowercase__ )
# verify the logits
SCREAMING_SNAKE_CASE_ : Tuple = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , lowercase__ )
SCREAMING_SNAKE_CASE_ : int = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase__ , atol=1e-4 ) )
| 68
| 0
|
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
A__ : List[Any] = WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""])
def _a ( __UpperCamelCase : int ):
lowerCAmelCase__ : Dict = test_results.split(''' ''' )
lowerCAmelCase__ : Dict = 0
lowerCAmelCase__ : Union[str, Any] = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
lowerCAmelCase__ : Tuple = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(__UpperCamelCase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _a ( __UpperCamelCase : Optional[int] ):
lowerCAmelCase__ : Any = {}
lowerCAmelCase__ : str = None
lowerCAmelCase__ : int = False
for line in failures_short_lines.split('''\n''' ):
if re.search(R'''_ \[doctest\]''' ,__UpperCamelCase ):
lowerCAmelCase__ : int = True
lowerCAmelCase__ : List[str] = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
lowerCAmelCase__ : Dict = line
lowerCAmelCase__ : List[str] = False
return failures
class lowercase :
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : str = title
lowerCAmelCase__ : Tuple = doc_test_results['''time_spent'''].split(''',''' )[0]
lowerCAmelCase__ : Optional[int] = doc_test_results['''success''']
lowerCAmelCase__ : Dict = doc_test_results['''failures''']
lowerCAmelCase__ : Dict = self.n_success + self.n_failures
# Failures and success of the modeling tests
lowerCAmelCase__ : Dict = doc_test_results
@property
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = [self._time_spent]
lowerCAmelCase__ : int = 0
for time in time_spent:
lowerCAmelCase__ : Dict = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(SCREAMING_SNAKE_CASE__ ) == 1:
lowerCAmelCase__ : Optional[int] = [0, 0, time_parts[0]]
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return f'''{int(SCREAMING_SNAKE_CASE__ )}h{int(SCREAMING_SNAKE_CASE__ )}m{int(SCREAMING_SNAKE_CASE__ )}s'''
@property
def lowercase_ ( self ):
"""simple docstring"""
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def lowercase_ ( self ):
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def lowercase_ ( self ):
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
f''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = 40
lowerCAmelCase__ : List[str] = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
lowerCAmelCase__ : int = ''''''
for category, failures in category_failures.items():
if len(SCREAMING_SNAKE_CASE__ ) == 0:
continue
if report != "":
report += "\n\n"
report += f'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(SCREAMING_SNAKE_CASE__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(SCREAMING_SNAKE_CASE__ )
@staticmethod
def lowercase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': f'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(SCREAMING_SNAKE_CASE__ )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text='''There was an issue running the tests.''' , blocks=SCREAMING_SNAKE_CASE__ , )
def lowercase_ ( self ):
"""simple docstring"""
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
lowerCAmelCase__ : int = f'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else '''All tests passed.'''
lowerCAmelCase__ : Any = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , blocks=self.payload , text=SCREAMING_SNAKE_CASE__ , )
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : Any = ''''''
for key, value in failures.items():
lowerCAmelCase__ : Optional[Any] = value[:200] + ''' [Truncated]''' if len(SCREAMING_SNAKE_CASE__ ) > 250 else value
failures_text += f'''*{key}*\n_{value}_\n\n'''
lowerCAmelCase__ : Tuple = job_name
lowerCAmelCase__ : List[str] = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
lowerCAmelCase__ : Any = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def lowercase_ ( self ):
"""simple docstring"""
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
lowerCAmelCase__ : int = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
lowerCAmelCase__ : Tuple = sorted(self.doc_test_results.items() , key=lambda SCREAMING_SNAKE_CASE__ : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
lowerCAmelCase__ : str = f'''*Num failures* :{len(job_result["failed"] )} \n'''
lowerCAmelCase__ : Union[str, Any] = job_result['''failures''']
lowerCAmelCase__ : Optional[Any] = self.get_reply_blocks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , text=SCREAMING_SNAKE_CASE__ )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text=f'''Results for {job}''' , blocks=SCREAMING_SNAKE_CASE__ , thread_ts=self.thread_ts['''ts'''] , )
time.sleep(1 )
def _a ( ):
lowerCAmelCase__ : List[Any] = os.environ['''GITHUB_RUN_ID''']
lowerCAmelCase__ : List[str] = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
lowerCAmelCase__ : Tuple = requests.get(__UpperCamelCase ).json()
lowerCAmelCase__ : str = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
lowerCAmelCase__ : List[Any] = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(__UpperCamelCase ):
lowerCAmelCase__ : Optional[Any] = requests.get(url + f'''&page={i + 2}''' ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''' ,__UpperCamelCase )
return {}
def _a ( __UpperCamelCase : str ):
lowerCAmelCase__ : Union[str, Any] = {}
if os.path.exists(__UpperCamelCase ):
lowerCAmelCase__ : int = os.listdir(__UpperCamelCase )
for file in files:
try:
with open(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,encoding='''utf-8''' ) as f:
lowerCAmelCase__ : str = f.read()
except UnicodeDecodeError as e:
raise ValueError(f'''Could not open {os.path.join(__UpperCamelCase ,__UpperCamelCase )}.''' ) from e
return _artifact
def _a ( ):
class lowercase :
def __init__( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : int = name
lowerCAmelCase__ : int = []
def __str__( self ):
"""simple docstring"""
return self.name
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
self.paths.append({'''name''': self.name, '''path''': path} )
lowerCAmelCase__ : Dict[str, Artifact] = {}
lowerCAmelCase__ : List[Any] = filter(os.path.isdir ,os.listdir() )
for directory in directories:
lowerCAmelCase__ : Optional[int] = directory
if artifact_name not in _available_artifacts:
lowerCAmelCase__ : Any = Artifact(__UpperCamelCase )
_available_artifacts[artifact_name].add_path(__UpperCamelCase )
return _available_artifacts
if __name__ == "__main__":
A__ : List[Any] = get_job_links()
A__ : Any = retrieve_available_artifacts()
A__ : Tuple = collections.OrderedDict(
[
("""*.py""", """API Examples"""),
("""*.md""", """MD Examples"""),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
A__ : Optional[int] = {
v: {
"""failed""": [],
"""failures""": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
A__ : Optional[Any] = github_actions_job_links.get("""run_doctests""")
A__ : Optional[int] = available_artifacts["""doc_tests_gpu_test_reports"""].paths[0]
A__ : str = retrieve_artifact(artifact_path["""name"""])
if "stats" in artifact:
A__ , A__ , A__ : List[str] = handle_test_results(artifact["""stats"""])
A__ : Optional[int] = failed
A__ : List[Any] = success
A__ : int = time_spent[1:-1] + """, """
A__ : List[str] = extract_first_line_failure(artifact["""failures_short"""])
for line in artifact["summary_short"].split("""\n"""):
if re.search("""FAILED""", line):
A__ : Dict = line.replace("""FAILED """, """""")
A__ : str = line.split()[0].replace("""\n""", """""")
if "::" in line:
A__ , A__ : Optional[Any] = line.split("""::""")
else:
A__ , A__ : Dict = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
A__ : Optional[int] = docs[file_regex]
doc_test_results[category]["failed"].append(test)
A__ : List[Any] = all_failures[test] if test in all_failures else """N/A"""
A__ : Optional[int] = failure
break
A__ : Optional[Any] = Message("""🤗 Results of the doc tests.""", doc_test_results)
message.post()
message.post_reply()
| 233
|
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
A__ : Dict = TypeVar("""T""")
def _a ( __UpperCamelCase : int ):
return (position - 1) // 2
def _a ( __UpperCamelCase : int ):
return (2 * position) + 1
def _a ( __UpperCamelCase : int ):
return (2 * position) + 2
class lowercase ( Generic[T] ):
def __init__( self ):
"""simple docstring"""
lowerCAmelCase__ : list[tuple[T, int]] = []
lowerCAmelCase__ : dict[T, int] = {}
lowerCAmelCase__ : int = 0
def __len__( self ):
"""simple docstring"""
return self.elements
def __repr__( self ):
"""simple docstring"""
return str(self.heap )
def lowercase_ ( self ):
"""simple docstring"""
return self.elements == 0
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
self.heap.append((elem, weight) )
lowerCAmelCase__ : Dict = self.elements
self.elements += 1
self._bubble_up(SCREAMING_SNAKE_CASE__ )
def lowercase_ ( self ):
"""simple docstring"""
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self.heap[0]
self._bubble_down(SCREAMING_SNAKE_CASE__ )
return elem
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.position_map[elem]
lowerCAmelCase__ : str = (elem, weight)
if position > 0:
lowerCAmelCase__ : Any = get_parent_position(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(SCREAMING_SNAKE_CASE__ )
else:
self._bubble_down(SCREAMING_SNAKE_CASE__ )
else:
self._bubble_down(SCREAMING_SNAKE_CASE__ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.position_map[elem]
if curr_pos == 0:
return None
lowerCAmelCase__ : Optional[int] = get_parent_position(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.heap[curr_pos]
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_up(SCREAMING_SNAKE_CASE__ )
return None
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.position_map[elem]
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self.heap[curr_pos]
lowerCAmelCase__ : Tuple = get_child_left_position(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Any = get_child_right_position(SCREAMING_SNAKE_CASE__ )
if child_left_position < self.elements and child_right_position < self.elements:
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.heap[child_left_position]
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
if child_left_position < self.elements:
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
else:
return None
if child_right_position < self.elements:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
return None
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : Dict = self.heap[nodea_pos][0]
lowerCAmelCase__ : Dict = self.heap[nodea_pos][0]
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
lowerCAmelCase__ : Tuple = nodea_pos
lowerCAmelCase__ : int = nodea_pos
class lowercase ( Generic[T] ):
def __init__( self ):
"""simple docstring"""
lowerCAmelCase__ : dict[T, dict[T, int]] = {}
lowerCAmelCase__ : int = 0
def __repr__( self ):
"""simple docstring"""
return str(self.connections )
def __len__( self ):
"""simple docstring"""
return self.nodes
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if node not in self.connections:
lowerCAmelCase__ : Union[str, Any] = {}
self.nodes += 1
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
self.add_node(SCREAMING_SNAKE_CASE__ )
self.add_node(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Tuple = weight
lowerCAmelCase__ : Tuple = weight
def _a ( __UpperCamelCase : GraphUndirectedWeighted[T] ,):
lowerCAmelCase__ : dict[T, int] = {node: maxsize for node in graph.connections}
lowerCAmelCase__ : dict[T, T | None] = {node: None for node in graph.connections}
lowerCAmelCase__ : MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__UpperCamelCase ,__UpperCamelCase )
if priority_queue.is_empty():
return dist, parent
# initialization
lowerCAmelCase__ : List[Any] = priority_queue.extract_min()
lowerCAmelCase__ : str = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowerCAmelCase__ : List[str] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__UpperCamelCase ,dist[neighbour] )
lowerCAmelCase__ : Optional[Any] = node
# running prim's algorithm
while not priority_queue.is_empty():
lowerCAmelCase__ : List[str] = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowerCAmelCase__ : Optional[Any] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__UpperCamelCase ,dist[neighbour] )
lowerCAmelCase__ : Optional[int] = node
return dist, parent
| 233
| 1
|
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
return [sentence[i : i + ngram_size] for i in range(len(lowerCamelCase_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 671
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
A__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : WhisperForConditionalGeneration, lowerCamelCase : WhisperProcessor, lowerCamelCase : AutoencoderKL, lowerCamelCase : CLIPTextModel, lowerCamelCase : CLIPTokenizer, lowerCamelCase : UNetaDConditionModel, lowerCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], lowerCamelCase : StableDiffusionSafetyChecker, lowerCamelCase : CLIPImageProcessor, ):
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
speech_model=lowerCamelCase, speech_processor=lowerCamelCase, vae=lowerCamelCase, text_encoder=lowerCamelCase, tokenizer=lowerCamelCase, unet=lowerCamelCase, scheduler=lowerCamelCase, feature_extractor=lowerCamelCase, )
def lowercase__ ( self : Optional[Any], lowerCamelCase : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
lowercase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
self.enable_attention_slicing(lowerCamelCase )
@torch.no_grad()
def __call__( self : Any, lowerCamelCase : Optional[Any], lowerCamelCase : Optional[Any]=16_000, lowerCamelCase : int = 512, lowerCamelCase : int = 512, lowerCamelCase : int = 50, lowerCamelCase : float = 7.5, lowerCamelCase : Optional[Union[str, List[str]]] = None, lowerCamelCase : Optional[int] = 1, lowerCamelCase : float = 0.0, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : Optional[torch.FloatTensor] = None, lowerCamelCase : Optional[str] = "pil", lowerCamelCase : bool = True, lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, lowerCamelCase : int = 1, **lowerCamelCase : Optional[Any], ):
'''simple docstring'''
lowercase__ = self.speech_processor.feature_extractor(
lowerCamelCase, return_tensors='''pt''', sampling_rate=lowerCamelCase ).input_features.to(self.device )
lowercase__ = self.speech_model.generate(lowerCamelCase, max_length=480_000 )
lowercase__ = self.speech_processor.tokenizer.batch_decode(lowerCamelCase, skip_special_tokens=lowerCamelCase, normalize=lowerCamelCase )[
0
]
if isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = 1
elif isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = len(lowerCamelCase )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase, lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(lowerCamelCase )}.""" )
# get prompt text embeddings
lowercase__ = self.tokenizer(
lowerCamelCase, padding='''max_length''', max_length=self.tokenizer.model_max_length, return_tensors='''pt''', )
lowercase__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowercase__ = text_input_ids[:, : self.tokenizer.model_max_length]
lowercase__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowercase__ , lowercase__ , lowercase__ = text_embeddings.shape
lowercase__ = text_embeddings.repeat(1, lowerCamelCase, 1 )
lowercase__ = text_embeddings.view(bs_embed * num_images_per_prompt, lowerCamelCase, -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase__ = 42
if negative_prompt is None:
lowercase__ = [''''''] * batch_size
elif type(lowerCamelCase ) is not type(lowerCamelCase ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase )} !="""
F""" {type(lowerCamelCase )}.""" )
elif isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = [negative_prompt]
elif batch_size != len(lowerCamelCase ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
''' the batch size of `prompt`.''' )
else:
lowercase__ = negative_prompt
lowercase__ = text_input_ids.shape[-1]
lowercase__ = self.tokenizer(
lowerCamelCase, padding='''max_length''', max_length=lowerCamelCase, truncation=lowerCamelCase, return_tensors='''pt''', )
lowercase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase__ = uncond_embeddings.shape[1]
lowercase__ = uncond_embeddings.repeat(1, lowerCamelCase, 1 )
lowercase__ = uncond_embeddings.view(batch_size * num_images_per_prompt, lowerCamelCase, -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowercase__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowercase__ = torch.randn(lowerCamelCase, generator=lowerCamelCase, device='''cpu''', dtype=lowerCamelCase ).to(
self.device )
else:
lowercase__ = torch.randn(lowerCamelCase, generator=lowerCamelCase, device=self.device, dtype=lowerCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
lowercase__ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowerCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowercase__ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase__ = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase__ = {}
if accepts_eta:
lowercase__ = eta
for i, t in enumerate(self.progress_bar(lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = self.scheduler.scale_model_input(lowerCamelCase, lowerCamelCase )
# predict the noise residual
lowercase__ = self.unet(lowerCamelCase, lowerCamelCase, encoder_hidden_states=lowerCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = 1 / 0.18215 * latents
lowercase__ = self.vae.decode(lowerCamelCase ).sample
lowercase__ = (image / 2 + 0.5).clamp(0, 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ = image.cpu().permute(0, 2, 3, 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowerCamelCase, nsfw_content_detected=lowerCamelCase )
| 671
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = KandinskyInpaintPipeline
a__ = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
a__ = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
a__ = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
a__ = False
@property
def _A ( self :str ) -> str:
'''simple docstring'''
return 32
@property
def _A ( self :Any ) -> List[str]:
'''simple docstring'''
return 32
@property
def _A ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.time_input_dim
@property
def _A ( self :int ) -> Tuple:
'''simple docstring'''
return self.time_input_dim * 4
@property
def _A ( self :int ) -> Tuple:
'''simple docstring'''
return 100
@property
def _A ( self :Union[str, Any] ) -> str:
'''simple docstring'''
snake_case_ : Any = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def _A ( self :Optional[Any] ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
snake_case_ : Union[str, Any] = MultilingualCLIP(lowerCAmelCase__ )
snake_case_ : List[Any] = text_encoder.eval()
return text_encoder
@property
def _A ( self :List[str] ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Any = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
snake_case_ : Union[str, Any] = UNetaDConditionModel(**lowerCAmelCase__ )
return model
@property
def _A ( self :str ) -> Any:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _A ( self :Optional[Any] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def _A ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = self.dummy_text_encoder
snake_case_ : List[str] = self.dummy_tokenizer
snake_case_ : str = self.dummy_unet
snake_case_ : str = self.dummy_movq
snake_case_ : Optional[Any] = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="linear" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , steps_offset=1 , prediction_type="epsilon" , thresholding=lowerCAmelCase__ , )
snake_case_ : Optional[int] = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def _A ( self :Optional[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Dict=0 ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[int] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
snake_case_ : Optional[int] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(lowerCAmelCase__ )
# create init_image
snake_case_ : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
snake_case_ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ : List[str] = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert("RGB" ).resize((256, 256) )
# create mask
snake_case_ : Optional[Any] = np.ones((64, 64) , dtype=np.floataa )
snake_case_ : Any = 0
if str(lowerCAmelCase__ ).startswith("mps" ):
snake_case_ : int = torch.manual_seed(lowerCAmelCase__ )
else:
snake_case_ : Optional[Any] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
snake_case_ : int = {
"prompt": "horse",
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def _A ( self :int ) -> List[str]:
'''simple docstring'''
snake_case_ : Any = "cpu"
snake_case_ : List[Any] = self.get_dummy_components()
snake_case_ : Dict = self.pipeline_class(**lowerCAmelCase__ )
snake_case_ : int = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : Optional[int] = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) )
snake_case_ : Any = output.images
snake_case_ : Optional[int] = pipe(
**self.get_dummy_inputs(lowerCAmelCase__ ) , return_dict=lowerCAmelCase__ , )[0]
snake_case_ : Tuple = image[0, -3:, -3:, -1]
snake_case_ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
snake_case_ : Tuple = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def _A ( self :Tuple ) -> Optional[int]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :str ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self :int ) -> Any:
'''simple docstring'''
snake_case_ : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" )
snake_case_ : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
snake_case_ : Optional[Any] = np.ones((768, 768) , dtype=np.floataa )
snake_case_ : Optional[int] = 0
snake_case_ : Optional[int] = "a hat"
snake_case_ : Any = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(lowerCAmelCase__ )
snake_case_ : Optional[int] = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa )
snake_case_ : List[Any] = pipeline.to(lowerCAmelCase__ )
pipeline.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : str = torch.Generator(device="cpu" ).manual_seed(0 )
snake_case_, snake_case_ : int = pipe_prior(
lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
snake_case_ : Tuple = pipeline(
lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , image_embeds=lowerCAmelCase__ , negative_image_embeds=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=100 , height=768 , width=768 , output_type="np" , )
snake_case_ : str = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
| 653
|
'''simple docstring'''
from string import ascii_uppercase
__lowerCamelCase : Optional[Any] = {char: i for i, char in enumerate(ascii_uppercase)}
__lowerCamelCase : List[str] = dict(enumerate(ascii_uppercase))
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Tuple = len(__magic_name__ )
snake_case_ : str = 0
while True:
if x == i:
snake_case_ : List[str] = 0
if len(__magic_name__ ) == len(__magic_name__ ):
break
key += key[i]
i += 1
return key
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : str = ""
snake_case_ : List[Any] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
snake_case_ : Optional[Any] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Dict = ""
snake_case_ : Dict = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
snake_case_ : str = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __UpperCAmelCase ( )-> None:
"""simple docstring"""
snake_case_ : List[str] = "THE GERMAN ATTACK"
snake_case_ : List[str] = "SECRET"
snake_case_ : Optional[int] = generate_key(__magic_name__ ,__magic_name__ )
snake_case_ : Any = cipher_text(__magic_name__ ,__magic_name__ )
print(F'''Encrypted Text = {s}''' )
print(F'''Original Text = {original_text(__magic_name__ ,__magic_name__ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 653
| 1
|
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
a : int = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""feature request""",
"""new model""",
"""wip""",
]
def __lowerCamelCase ( ) -> Dict:
UpperCAmelCase : str = Github(os.environ["""GITHUB_TOKEN"""] )
UpperCAmelCase : Dict = g.get_repo("""huggingface/transformers""" )
UpperCAmelCase : int = repo.get_issues(state="""open""" )
for issue in open_issues:
UpperCAmelCase : Optional[int] = sorted([comment for comment in issue.get_comments()] , key=lambda _lowercase : i.created_at , reverse=_lowercase )
UpperCAmelCase : Any = comments[0] if len(_lowercase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 672
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a : Any = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
a : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672
| 1
|
"""simple docstring"""
def lowerCamelCase__ ( __snake_case, __snake_case = 0 ) -> list:
"""simple docstring"""
_UpperCamelCase = length or len(__snake_case )
_UpperCamelCase = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
_UpperCamelCase , _UpperCamelCase = list_data[i + 1], list_data[i]
_UpperCamelCase = True
return list_data if not swapped else bubble_sort(__snake_case, length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 19
|
"""simple docstring"""
def _a ( UpperCAmelCase__ = 10 ) -> str:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or n < 0:
raise ValueError('''Invalid input''' )
__SCREAMING_SNAKE_CASE = 10**n
__SCREAMING_SNAKE_CASE = 2_84_33 * (pow(2 , 7_83_04_57 , UpperCAmelCase__ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'''{solution(10) = }''')
| 482
| 0
|
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : str ):
"""simple docstring"""
_snake_case : str = int(snake_case__ )
# Initialize Result
_snake_case : str = []
# Traverse through all denomination
for denomination in reversed(snake_case__ ):
# Find denominations
while int(snake_case__ ) >= int(snake_case__ ):
total_value -= int(snake_case__ )
answer.append(snake_case__ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
A_ = []
A_ = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
A_ = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
A_ = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
A_ = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
A_ = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F'''Following is minimal change for {value}: ''')
A_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 28
|
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
_snake_case : Dict = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28
| 1
|
'''simple docstring'''
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
a__ : List[str] = logging.get_logger(__name__)
a__ : Tuple = R"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
"""
class __snake_case ( __magic_name__ ):
@add_start_docstrings(UpperCamelCase_ )
def __call__( self , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[int]:
raise NotImplementedError('StoppingCriteria needs to be subclassed' )
class __snake_case ( __magic_name__ ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> str:
snake_case__ = max_length
snake_case__ = max_position_embeddings
@add_start_docstrings(UpperCamelCase_ )
def __call__( self , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[int]:
snake_case__ = input_ids.shape[-1]
snake_case__ = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'This is a friendly reminder - the current text generation call will exceed the model\'s predefined '
F'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '''
'exceptions, performance degradation, or nothing at all.' )
return is_done
class __snake_case ( __magic_name__ ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]:
warnings.warn(
'The class `MaxNewTokensCriteria` is deprecated. '
F'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '''
'with `max_length = start_length + max_new_tokens` instead.' , UpperCamelCase_ , )
snake_case__ = start_length
snake_case__ = max_new_tokens
snake_case__ = start_length + max_new_tokens
@add_start_docstrings(UpperCamelCase_ )
def __call__( self , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ) -> int:
return input_ids.shape[-1] >= self.max_length
class __snake_case ( __magic_name__ ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> int:
snake_case__ = max_time
snake_case__ = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(UpperCamelCase_ )
def __call__( self , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ) -> Dict:
return time.time() - self.initial_timestamp > self.max_time
class __snake_case ( __magic_name__ ):
@add_start_docstrings(UpperCamelCase_ )
def __call__( self , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ) -> List[Any]:
return any(criteria(UpperCamelCase_ , UpperCamelCase_ ) for criteria in self )
@property
def _snake_case ( self ) -> List[Any]:
for stopping_criterium in self:
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return stopping_criterium.max_length
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return stopping_criterium.max_length
return None
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ ) ->Tuple:
snake_case__ = stopping_criteria.max_length
snake_case__ = deepcopy(UpperCAmelCase_ )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('You set different `max_length` for stopping criteria and `max_length` parameter' , UpperCAmelCase_ )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=UpperCAmelCase_ ) )
return new_stopping_criteria
| 368
|
'''simple docstring'''
def __snake_case ( UpperCAmelCase_ : str ):
lowerCamelCase_ = ""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def __snake_case ( UpperCAmelCase_ : str ):
lowerCamelCase_ = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
lowerCamelCase_ = remove_duplicates(key.upper() )
lowerCamelCase_ = len(UpperCAmelCase_ )
# First fill cipher with key characters
lowerCamelCase_ = {alphabet[i]: char for i, char in enumerate(UpperCAmelCase_ )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(UpperCAmelCase_ ) , 26 ):
lowerCamelCase_ = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
lowerCamelCase_ = alphabet[i - offset]
lowerCamelCase_ = char
return cipher_alphabet
def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : dict[str, str] ):
return "".join(cipher_map.get(UpperCAmelCase_ , UpperCAmelCase_ ) for ch in message.upper() )
def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : dict[str, str] ):
lowerCamelCase_ = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(UpperCAmelCase_ , UpperCAmelCase_ ) for ch in message.upper() )
def __snake_case ( ):
lowerCamelCase_ = input("Enter message to encode or decode: " ).strip()
lowerCamelCase_ = input("Enter keyword: " ).strip()
lowerCamelCase_ = input("Encipher or decipher? E/D:" ).strip()[0].lower()
try:
lowerCamelCase_ = {"e": encipher, "d": decipher}[option]
except KeyError:
raise KeyError("invalid input option" )
lowerCamelCase_ = create_cipher_map(UpperCAmelCase_ )
print(func(UpperCAmelCase_ , UpperCAmelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 675
| 0
|
'''simple docstring'''
def _a( UpperCamelCase__ : int, UpperCamelCase__ : int ):
'''simple docstring'''
while b:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict =b, a % b
return a
def _a( UpperCamelCase__ : int, UpperCamelCase__ : int ):
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(UpperCamelCase__, a % b )
def _a( ):
'''simple docstring'''
print(f"euclidean_gcd(3, 5) = {euclidean_gcd(3, 5 )}" )
print(f"euclidean_gcd(5, 3) = {euclidean_gcd(5, 3 )}" )
print(f"euclidean_gcd(1, 3) = {euclidean_gcd(1, 3 )}" )
print(f"euclidean_gcd(3, 6) = {euclidean_gcd(3, 6 )}" )
print(f"euclidean_gcd(6, 3) = {euclidean_gcd(6, 3 )}" )
print(f"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3, 5 )}" )
print(f"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5, 3 )}" )
print(f"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1, 3 )}" )
print(f"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3, 6 )}" )
print(f"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6, 3 )}" )
if __name__ == "__main__":
main()
| 665
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """gpt_bigcode"""
snake_case_ = ["""past_key_values"""]
snake_case_ = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any , __lowercase : Any=5_02_57 , __lowercase : int=10_24 , __lowercase : List[str]=7_68 , __lowercase : Optional[int]=12 , __lowercase : Dict=12 , __lowercase : List[str]=None , __lowercase : int="gelu_pytorch_tanh" , __lowercase : Union[str, Any]=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[Any]=1e-5 , __lowercase : List[str]=0.02 , __lowercase : Tuple=True , __lowercase : Optional[Any]=True , __lowercase : Union[str, Any]=5_02_56 , __lowercase : List[Any]=5_02_56 , __lowercase : Union[str, Any]=True , __lowercase : List[str]=True , __lowercase : Dict=True , **__lowercase : List[Any] , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =n_positions
SCREAMING_SNAKE_CASE__ : Dict =n_embd
SCREAMING_SNAKE_CASE__ : Dict =n_layer
SCREAMING_SNAKE_CASE__ : Union[str, Any] =n_head
SCREAMING_SNAKE_CASE__ : List[str] =n_inner
SCREAMING_SNAKE_CASE__ : List[str] =activation_function
SCREAMING_SNAKE_CASE__ : List[Any] =resid_pdrop
SCREAMING_SNAKE_CASE__ : List[Any] =embd_pdrop
SCREAMING_SNAKE_CASE__ : List[str] =attn_pdrop
SCREAMING_SNAKE_CASE__ : Dict =layer_norm_epsilon
SCREAMING_SNAKE_CASE__ : List[str] =initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] =scale_attn_weights
SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_cache
SCREAMING_SNAKE_CASE__ : Dict =attention_softmax_in_fpaa
SCREAMING_SNAKE_CASE__ : int =scale_attention_softmax_in_fpaa
SCREAMING_SNAKE_CASE__ : Dict =multi_query
SCREAMING_SNAKE_CASE__ : Optional[Any] =bos_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] =eos_token_id
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
| 665
| 1
|
from timeit import timeit
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
if number < 0:
raise ValueError('the value of input must not be negative' )
lowercase = 0
while number:
number &= number - 1
result += 1
return result
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
if number < 0:
raise ValueError('the value of input must not be negative' )
lowercase = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def UpperCAmelCase_ ( ):
def do_benchmark(__SCREAMING_SNAKE_CASE ) -> None:
lowercase = 'import __main__ as z'
print(F'''Benchmark when {number = }:''' )
print(F'''{get_set_bits_count_using_modulo_operator(__SCREAMING_SNAKE_CASE ) = }''' )
lowercase = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=__SCREAMING_SNAKE_CASE )
print(F'''timeit() runs in {timing} seconds''' )
print(F'''{get_set_bits_count_using_brian_kernighans_algorithm(__SCREAMING_SNAKE_CASE ) = }''' )
lowercase = timeit(
'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=__SCREAMING_SNAKE_CASE , )
print(F'''timeit() runs in {timing} seconds''' )
for number in (25, 37, 58, 0):
do_benchmark(__SCREAMING_SNAKE_CASE )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 84
|
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _A :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE_=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="relu" , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=None , ) -> Any:
'''simple docstring'''
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = embeddings_size
UpperCamelCase__ = hidden_sizes
UpperCamelCase__ = depths
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = hidden_act
UpperCamelCase__ = num_labels
UpperCamelCase__ = scope
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE_ )
def _a (self ) -> int:
'''simple docstring'''
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase__ = self.get_config()
return config, pixel_values, labels
def _a (self ) -> List[str]:
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = TFRegNetModel(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFRegNetForImageClassification(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a (self ) -> str:
'''simple docstring'''
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _A ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : str =(TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : Dict =(
{"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Dict =False
SCREAMING_SNAKE_CASE_ : Union[str, Any] =False
SCREAMING_SNAKE_CASE_ : int =False
SCREAMING_SNAKE_CASE_ : List[Any] =False
SCREAMING_SNAKE_CASE_ : Optional[Any] =False
def _a (self ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = TFRegNetModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ )
def _a (self ) -> Union[str, Any]:
'''simple docstring'''
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def _a (self ) -> int:
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
@slow
def _a (self ) -> Dict:
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def _a (self ) -> Optional[Any]:
'''simple docstring'''
pass
def _a (self ) -> int:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def _a (self ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def _a (self ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , training=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase__ = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCamelCase__ = layer_type
UpperCamelCase__ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _a (self ) -> str:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_={} ):
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).to_tuple()
def recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) , msg=(
'''Tuple and dict output are not equal. Difference:'''
F" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"
) , )
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'''output_hidden_states''': True} )
UpperCamelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'''output_hidden_states''': True} )
def _a (self ) -> Any:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def _a (self ) -> Optional[int]:
'''simple docstring'''
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = TFRegNetModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def __UpperCamelCase ( ):
UpperCamelCase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _A ( unittest.TestCase ):
@cached_property
def _a (self ) -> Any:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _a (self ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''tf''' )
# forward pass
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
# verify the logits
UpperCamelCase__ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
| 415
| 0
|
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class UpperCamelCase_ :
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=64 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.0_2 , A=3 , A=4 , A=None , ) -> Dict:
UpperCAmelCase : Union[str, Any] = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : Optional[int] = seq_length
UpperCAmelCase : int = is_training
UpperCAmelCase : List[str] = use_input_mask
UpperCAmelCase : Any = use_token_type_ids
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : str = vocab_size
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : str = embedding_size
UpperCAmelCase : Dict = num_hidden_layers
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : Optional[int] = intermediate_size
UpperCAmelCase : str = hidden_act
UpperCAmelCase : str = hidden_dropout_prob
UpperCAmelCase : Dict = attention_probs_dropout_prob
UpperCAmelCase : Any = max_position_embeddings
UpperCAmelCase : str = type_vocab_size
UpperCAmelCase : Tuple = type_sequence_label_size
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : Any = num_labels
UpperCAmelCase : Union[str, Any] = num_choices
UpperCAmelCase : Dict = scope
def _lowercase( self ) -> Any:
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Tuple = None
if self.use_input_mask:
UpperCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : List[str] = None
if self.use_token_type_ids:
UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : str = None
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : List[str] = None
if self.use_labels:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase( self ) -> Tuple:
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
def _lowercase( self , A , A , A , A , A , A , A ) -> str:
UpperCAmelCase : Any = MegatronBertModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase : List[str] = model(A , attention_mask=A , token_type_ids=A )
UpperCAmelCase : Union[str, Any] = model(A , token_type_ids=A )
UpperCAmelCase : Tuple = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowercase( self , A , A , A , A , A , A , A ) -> Dict:
UpperCAmelCase : Dict = MegatronBertForMaskedLM(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Optional[Any] = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase( self , A , A , A , A , A , A , A ) -> int:
UpperCAmelCase : List[Any] = MegatronBertForCausalLM(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Union[str, Any] = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase( self , A , A , A , A , A , A , A ) -> Tuple:
UpperCAmelCase : Dict = MegatronBertForNextSentencePrediction(config=A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = model(
A , attention_mask=A , token_type_ids=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _lowercase( self , A , A , A , A , A , A , A ) -> Tuple:
UpperCAmelCase : List[Any] = MegatronBertForPreTraining(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Union[str, Any] = model(
A , attention_mask=A , token_type_ids=A , labels=A , next_sentence_label=A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _lowercase( self , A , A , A , A , A , A , A ) -> List[str]:
UpperCAmelCase : List[str] = MegatronBertForQuestionAnswering(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Any = model(
A , attention_mask=A , token_type_ids=A , start_positions=A , end_positions=A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase( self , A , A , A , A , A , A , A ) -> Tuple:
UpperCAmelCase : Any = self.num_labels
UpperCAmelCase : Dict = MegatronBertForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : Optional[Any] = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase( self , A , A , A , A , A , A , A ) -> Tuple:
UpperCAmelCase : Tuple = self.num_labels
UpperCAmelCase : Optional[Any] = MegatronBertForTokenClassification(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Union[str, Any] = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase( self , A , A , A , A , A , A , A ) -> str:
UpperCAmelCase : Dict = self.num_choices
UpperCAmelCase : str = MegatronBertForMultipleChoice(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : str = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : Any = model(
A , attention_mask=A , token_type_ids=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : int = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Any = config_and_inputs
UpperCAmelCase : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( lowercase_ , lowercase_ , unittest.TestCase ):
lowercase = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase = (
{
'''feature-extraction''': MegatronBertModel,
'''fill-mask''': MegatronBertForMaskedLM,
'''question-answering''': MegatronBertForQuestionAnswering,
'''text-classification''': MegatronBertForSequenceClassification,
'''text-generation''': MegatronBertForCausalLM,
'''token-classification''': MegatronBertForTokenClassification,
'''zero-shot''': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = True
# test_resize_embeddings = False
lowercase = False
def _lowercase( self , A , A , A=False ) -> Dict:
UpperCAmelCase : Dict = super()._prepare_for_class(A , A , return_labels=A )
if return_labels:
if model_class in get_values(A ):
UpperCAmelCase : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=A )
UpperCAmelCase : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
return inputs_dict
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Optional[Any] = MegatronBertModelTester(self )
UpperCAmelCase : Union[str, Any] = ConfigTester(self , config_class=A , hidden_size=37 )
def _lowercase( self ) -> List[str]:
self.config_tester.run_common_tests()
def _lowercase( self ) -> Any:
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*A )
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*A )
def _lowercase( self ) -> str:
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*A )
def _lowercase( self ) -> Any:
UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*A )
def _lowercase( self ) -> Dict:
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*A )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*A )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*A )
def _lowercase( self ) -> Any:
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*A )
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
return torch.tensor(
UpperCamelCase__ , dtype=torch.long , device=UpperCamelCase__ , )
a : Any = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( unittest.TestCase ):
@slow
@unittest.skip("""Model is not available.""" )
def _lowercase( self ) -> Any:
UpperCAmelCase : Dict = """nvidia/megatron-bert-uncased-345m"""
if "MYDIR" in os.environ:
UpperCAmelCase : str = os.path.join(os.environ["""MYDIR"""] , A )
UpperCAmelCase : Any = MegatronBertModel.from_pretrained(A )
model.to(A )
model.half()
UpperCAmelCase : Union[str, Any] = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
UpperCAmelCase : Any = model(A )[0]
UpperCAmelCase : List[str] = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , A )
UpperCAmelCase : int = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3 ):
for jj in range(3 ):
UpperCAmelCase : Optional[Any] = output[0, ii, jj]
UpperCAmelCase : Union[str, Any] = expected[3 * ii + jj]
UpperCAmelCase : Tuple = """ii={} jj={} a={} b={}""".format(A , A , A , A )
self.assertTrue(math.isclose(A , A , rel_tol=A , abs_tol=A ) , msg=A )
| 720
|
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
a : int = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""feature request""",
"""new model""",
"""wip""",
]
def __lowerCamelCase ( ) -> Dict:
UpperCAmelCase : str = Github(os.environ["""GITHUB_TOKEN"""] )
UpperCAmelCase : Dict = g.get_repo("""huggingface/transformers""" )
UpperCAmelCase : int = repo.get_issues(state="""open""" )
for issue in open_issues:
UpperCAmelCase : Optional[int] = sorted([comment for comment in issue.get_comments()] , key=lambda _lowercase : i.created_at , reverse=_lowercase )
UpperCAmelCase : Any = comments[0] if len(_lowercase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 672
| 0
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class __lowercase:
'''simple docstring'''
__a : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be trained.'} )
__a : Optional[str] = field(
default='./' , metadata={'help': 'Save dir where model repo is cloned and models updates are saved to.'} )
__a : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path of training dataset.'} )
__a : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
__a : Optional[int] = field(default=2 , metadata={'help': 'Batch size for training.'} )
__a : Optional[int] = field(default=2 , metadata={'help': 'Batch size for evaluation.'} )
__a : Optional[float] = field(default=0.1 , metadata={'help': 'Value of weight decay.'} )
__a : Optional[int] = field(
default=10000 , metadata={'help': 'Size of buffer used to shuffle streaming dataset.'} )
__a : Optional[float] = field(default=2E-4 , metadata={'help': 'Learning rate fo training.'} )
__a : Optional[str] = field(default='cosine' , metadata={'help': 'Learning rate.'} )
__a : Optional[int] = field(
default=750 , metadata={'help': 'Number of warmup steps in the learning rate schedule.'} )
__a : Optional[int] = field(
default=16 , metadata={'help': 'Number of gradient accumulation steps.'} )
__a : Optional[bool] = field(
default=lowercase__ , metadata={'help': 'Use gradient checkpointing to reduce memory footprint.'} )
__a : Optional[int] = field(default=50000 , metadata={'help': 'Maximum number of training steps.'} )
__a : Optional[int] = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
__a : Optional[int] = field(default=1024 , metadata={'help': 'Sequence lengths used for training.'} )
__a : Optional[int] = field(default=1 , metadata={'help': 'Training seed.'} )
__a : Optional[int] = field(
default=1024 , metadata={'help': 'Interval to save checkpoints. Measured as number of forward passes not training steps.'} , )
__a : Optional[str] = field(
default=lowercase__ , metadata={'help': 'States path if the training should continue from a checkpoint folder.'} )
__a : Optional[bool] = field(default=lowercase__ , metadata={'help': 'If True the data is pretokenized.'} )
@dataclass
class __lowercase:
'''simple docstring'''
__a : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
__a : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
__a : Optional[int] = field(default=2 , metadata={'help': 'Batch size used for evaluation.'} )
__a : Optional[int] = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
__a : Optional[int] = field(default=1024 , metadata={'help': 'Length of sequences to be evaluated.'} )
__a : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
@dataclass
class __lowercase:
'''simple docstring'''
__a : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
__a : Optional[int] = field(default=lowercase__ , metadata={'help': 'Number of workers used for code evaluation.'} )
__a : Optional[int] = field(
default=lowercase__ , metadata={'help': 'The number of human-eval tasks to run. If not included all tasks are evaluated.'} , )
__a : Optional[bool] = field(
default=lowercase__ , metadata={'help': 'Sample from the language model\'s output distribution.'} )
__a : Optional[float] = field(default=0.2 , metadata={'help': 'Sampling temperature used for generation.'} )
__a : Optional[int] = field(default=256 , metadata={'help': 'Maximum number of newly generated tokens.'} )
__a : Optional[int] = field(default=0 , metadata={'help': 'Top-k parameter used for generation.'} )
__a : Optional[float] = field(default=0.9_5 , metadata={'help': 'Top-p parameter used for nucleus sampling.'} )
__a : Optional[int] = field(default=10 , metadata={'help': 'Number of generations to run in parallel.'} )
__a : Optional[int] = field(
default=200 , metadata={'help': 'Number of completions to generate for each sample.'} )
__a : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
__a : Optional[str] = field(
default='eval_results.json' , metadata={'help': 'Random seed used for evaluation.'} )
__a : Optional[str] = field(
default='0' , metadata={'help': 'Allow `code_eval` to execute Python code on machine'} )
__a : Optional[int] = field(
default=-1 , metadata={
'help': (
'Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'
' number corresponds to which GPU device id to run on.'
)
} , )
@dataclass
class __lowercase:
'''simple docstring'''
__a : Optional[int] = field(
default=lowercase__ , metadata={
'help': 'The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'
} , )
__a : Optional[str] = field(
default='transformersbook/codeparrot' , metadata={'help': 'Folder or name of dataset to process.'} )
__a : Optional[str] = field(
default='codeparrot-clean' , metadata={'help': 'Folder to save processed processed dataset.'} )
__a : Optional[int] = field(
default=100000 , metadata={'help': 'Number of files to save per JSON output file.'} )
__a : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
__a : Optional[float] = field(
default=1000 , metadata={'help': 'Maximum line length in file, otherwise file is filtered.'} )
__a : Optional[float] = field(
default=100 , metadata={'help': 'Maximum mean line length in file, otherwise file is filtered.'} )
__a : Optional[float] = field(
default=0.2_5 , metadata={'help': 'Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'} )
__a : Optional[float] = field(
default=1.5 , metadata={'help': 'Minimum character token ratio for the file, otherwise file is filtered.'} )
__a : Optional[float] = field(
default=0.7 , metadata={'help': 'Probability for filtering config, test and uncommon files.'} )
__a : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} , )
__a : Optional[bool] = field(
default=lowercase__ , metadata={'help': 'If True, near-duplicate samples are removed.'} )
__a : Optional[float] = field(
default=0.8_5 , metadata={'help': 'Jaccard threshold for near-duplicate samples.'} )
@dataclass
class __lowercase:
'''simple docstring'''
__a : Optional[str] = field(
default='gpt2' , metadata={'help': 'Base tokenizer to build new tokenizer from.'} )
__a : Optional[str] = field(
default='transformersbook/codeparrot-train' , metadata={'help': 'Dataset to train tokenizer on.'} )
__a : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
__a : Optional[int] = field(default=200000 , metadata={'help': 'Number of examples to train tokenizer on.'} )
__a : Optional[int] = field(
default=32768 , metadata={'help': 'Number of examples to train the tokenizer on.'} )
__a : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of new tokenizer.'} )
__a : Optional[bool] = field(default=lowercase__ , metadata={'help': 'Push saved tokenizer to the hub.'} )
@dataclass
class __lowercase:
'''simple docstring'''
__a : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} )
__a : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path to the dataset to pretokenize.'} )
__a : Optional[str] = field(
default='tokenized-codeparrot-train' , metadata={'help': 'Repo name of the pretokenized data.'} )
__a : Optional[int] = field(default=lowercase__ , metadata={'help': 'Number of workers used for code evaluation.'} )
@dataclass
class __lowercase:
'''simple docstring'''
__a : Optional[str] = field(
default='gpt2-large' , metadata={'help': 'Configuration to use for model initialization.'} )
__a : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Tokenizer attached to model.'} )
__a : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of the created model.'} )
__a : Optional[bool] = field(default=lowercase__ , metadata={'help': 'Push saved tokenizer to the hub.'} )
| 594
|
"""simple docstring"""
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def UpperCAmelCase ( A__: str , A__: Dict , A__: List[Any]=[] ) -> int:
__lowerCamelCase : str = size[0] - overlap_pixels * 2
__lowerCamelCase : List[str] = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
__lowerCamelCase : Any = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
__lowerCamelCase : List[Any] = np.pad(A__ , mode='linear_ramp' , pad_width=A__ , end_values=0 )
if "l" in remove_borders:
__lowerCamelCase : str = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
__lowerCamelCase : Tuple = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
__lowerCamelCase : List[Any] = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
__lowerCamelCase : List[str] = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def UpperCAmelCase ( A__: List[str] , A__: Any , A__: Optional[int] ) -> str:
return max(A__ , min(A__ , A__ ) )
def UpperCAmelCase ( A__: [int] , A__: [int] , A__: [int] ) -> str:
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def UpperCAmelCase ( A__: [int] , A__: int , A__: [int] ) -> Optional[int]:
__lowerCamelCase : Dict = list(A__ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
__lowerCamelCase : Union[str, Any] = clamp_rect(A__ , [0, 0] , [image_size[0], image_size[1]] )
return rect
def UpperCAmelCase ( A__: str , A__: Optional[int] , A__: Any , A__: str ) -> int:
__lowerCamelCase : int = Image.new('RGB' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(A__ , (original_slice, 0) )
return result
def UpperCAmelCase ( A__: Dict , A__: Any ) -> List[Any]:
__lowerCamelCase : Optional[Any] = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
__lowerCamelCase : List[str] = tile.crop(A__ )
return tile
def UpperCAmelCase ( A__: Optional[Any] , A__: Dict ) -> Tuple:
__lowerCamelCase : List[str] = n % d
return n - divisor
class __lowercase( lowercase__ ):
'''simple docstring'''
def __init__( self , __a , __a , __a , __a , __a , __a , __a = 350 , ):
super().__init__(
vae=__a , text_encoder=__a , tokenizer=__a , unet=__a , low_res_scheduler=__a , scheduler=__a , max_noise_level=__a , )
def snake_case_ ( self , __a , __a , __a , __a , __a , __a , __a , **__a ):
torch.manual_seed(0 )
__lowerCamelCase : List[Any] = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
__lowerCamelCase : Tuple = add_overlap_rect(__a , __a , image.size )
__lowerCamelCase : List[str] = image.crop(__a )
__lowerCamelCase : Optional[Any] = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
__lowerCamelCase : List[str] = translated_slice_x - (original_image_slice / 2)
__lowerCamelCase : Dict = max(0 , __a )
__lowerCamelCase : List[str] = squeeze_tile(__a , __a , __a , __a )
__lowerCamelCase : Dict = to_input.size
__lowerCamelCase : str = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
__lowerCamelCase : Tuple = super(__a , self ).__call__(image=__a , **__a ).images[0]
__lowerCamelCase : List[Any] = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
__lowerCamelCase : Union[str, Any] = unsqueeze_tile(__a , __a )
__lowerCamelCase : Optional[int] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
__lowerCamelCase : Optional[int] = []
if x == 0:
remove_borders.append('l' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('r' )
if y == 0:
remove_borders.append('t' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('b' )
__lowerCamelCase : List[Any] = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__a ) , mode='L' , )
final_image.paste(
__a , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __a )
@torch.no_grad()
def __call__( self , __a , __a , __a = 75 , __a = 9.0 , __a = 50 , __a = None , __a = 1 , __a = 0.0 , __a = None , __a = None , __a = None , __a = 1 , __a = 128 , __a = 32 , __a = 32 , ):
__lowerCamelCase : Optional[int] = Image.new('RGB' , (image.size[0] * 4, image.size[1] * 4) )
__lowerCamelCase : Any = math.ceil(image.size[0] / tile_size )
__lowerCamelCase : List[str] = math.ceil(image.size[1] / tile_size )
__lowerCamelCase : List[Any] = tcx * tcy
__lowerCamelCase : Tuple = 0
for y in range(__a ):
for x in range(__a ):
self._process_tile(
__a , __a , __a , __a , __a , __a , __a , prompt=__a , num_inference_steps=__a , guidance_scale=__a , noise_level=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , )
current_count += 1
if callback is not None:
callback({'progress': current_count / total_tile_count, 'image': final_image} )
return final_image
def UpperCAmelCase ( ) -> Optional[int]:
# Run a demo
__lowerCamelCase : Union[str, Any] = 'stabilityai/stable-diffusion-x4-upscaler'
__lowerCamelCase : Tuple = StableDiffusionTiledUpscalePipeline.from_pretrained(A__ , revision='fp16' , torch_dtype=torch.floataa )
__lowerCamelCase : Optional[Any] = pipe.to('cuda' )
__lowerCamelCase : Optional[int] = Image.open('../../docs/source/imgs/diffusers_library.jpg' )
def callback(A__: Tuple ):
print(f'''progress: {obj["progress"]:.4f}''' )
obj["image"].save('diffusers_library_progress.jpg' )
__lowerCamelCase : int = pipe(image=A__ , prompt='Black font, white background, vector' , noise_level=40 , callback=A__ )
final_image.save('diffusers_library.jpg' )
if __name__ == "__main__":
main()
| 594
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : List[Any]=32 , __SCREAMING_SNAKE_CASE : str=3 , __SCREAMING_SNAKE_CASE : Union[str, Any]=10 , __SCREAMING_SNAKE_CASE : Any=[10, 20, 30, 40] , __SCREAMING_SNAKE_CASE : Tuple=[1, 1, 2, 1] , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : List[str]="relu" , __SCREAMING_SNAKE_CASE : Optional[int]=3 , __SCREAMING_SNAKE_CASE : int=None , ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = embeddings_size
__SCREAMING_SNAKE_CASE = hidden_sizes
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = scope
__SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE )
def _a ( self : Dict ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values
def _a ( self : int ) -> Union[str, Any]:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = FlaxRegNetModel(config=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = FlaxRegNetForImageClassification(config=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class A__( __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def _a ( self : Tuple ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = FlaxRegNetModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE )
def _a ( self : Tuple ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return
def _a ( self : Dict ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def _a ( self : List[str] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def _a ( self : List[str] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def _a ( self : List[Any] ) -> int:
"""simple docstring"""
pass
def _a ( self : int ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def _a ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
def check_hidden_states_output(__SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] ):
__SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__SCREAMING_SNAKE_CASE = self.model_tester.num_stages
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _a ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE )
@jax.jit
def model_jitted(__SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : Dict ):
return model(pixel_values=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
with self.subTest('''JIT Enabled''' ):
__SCREAMING_SNAKE_CASE = model_jitted(**__SCREAMING_SNAKE_CASE ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__SCREAMING_SNAKE_CASE = model_jitted(**__SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
for jitted_output, output in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(jitted_output.shape , output.shape )
def _a ( ) -> List[str]:
__SCREAMING_SNAKE_CASE = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class A__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _a ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def _a ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
__SCREAMING_SNAKE_CASE = self.default_image_processor
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''np''' )
__SCREAMING_SNAKE_CASE = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
__SCREAMING_SNAKE_CASE = (1, 10_00)
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = jnp.array([-0.41_80, -1.50_51, -3.48_36] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 709
|
"""simple docstring"""
from sklearn.metrics import recall_score
import datasets
lowerCAmelCase__ ="\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n"
lowerCAmelCase__ ="\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n"
lowerCAmelCase__ ="\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__( datasets.Metric ):
def _a ( self : Any ) -> int:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , )
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Optional[int]=1 , __SCREAMING_SNAKE_CASE : Optional[Any]="binary" , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : List[Any]="warn" , ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = recall_score(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , pos_label=__SCREAMING_SNAKE_CASE , average=__SCREAMING_SNAKE_CASE , sample_weight=__SCREAMING_SNAKE_CASE , zero_division=__SCREAMING_SNAKE_CASE , )
return {"recall": float(__SCREAMING_SNAKE_CASE ) if score.size == 1 else score}
| 690
| 0
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase : Dict = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
UpperCAmelCase : str = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
UpperCAmelCase : List[Any] = {
'allenai/led-base-16384': 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCamelCase ( ) -> str:
'''simple docstring'''
__UpperCAmelCase : List[Any] = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
__UpperCAmelCase : Optional[int] = bs[:]
__UpperCAmelCase : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCamelCase )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase : Union[str, Any] = [chr(_UpperCamelCase ) for n in cs]
return dict(zip(_UpperCamelCase , _UpperCamelCase ) )
def lowerCamelCase ( _UpperCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Dict = set()
__UpperCAmelCase : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : Dict = char
return pairs
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = ["""input_ids""", """attention_mask"""]
def __init__( self : Any , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any]="replace" , UpperCamelCase : List[str]="<s>" , UpperCamelCase : Tuple="</s>" , UpperCamelCase : int="</s>" , UpperCamelCase : Dict="<s>" , UpperCamelCase : Optional[int]="<unk>" , UpperCamelCase : str="<pad>" , UpperCamelCase : int="<mask>" , UpperCamelCase : Optional[Any]=False , **UpperCamelCase : Dict , ):
'''simple docstring'''
__UpperCAmelCase : int = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else bos_token
__UpperCAmelCase : List[str] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else eos_token
__UpperCAmelCase : List[str] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else sep_token
__UpperCAmelCase : List[str] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else cls_token
__UpperCAmelCase : Tuple = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else unk_token
__UpperCAmelCase : str = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Any = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token
super().__init__(
errors=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , cls_token=UpperCamelCase , pad_token=UpperCamelCase , mask_token=UpperCamelCase , add_prefix_space=UpperCamelCase , **UpperCamelCase , )
with open(UpperCamelCase , encoding="""utf-8""" ) as vocab_handle:
__UpperCAmelCase : List[Any] = json.load(UpperCamelCase )
__UpperCAmelCase : int = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase : Optional[int] = errors # how to handle errors in decoding
__UpperCAmelCase : Union[str, Any] = bytes_to_unicode()
__UpperCAmelCase : str = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase , encoding="""utf-8""" ) as merges_handle:
__UpperCAmelCase : Tuple = merges_handle.read().split("""\n""" )[1:-1]
__UpperCAmelCase : Any = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCAmelCase : str = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
__UpperCAmelCase : Tuple = {}
__UpperCAmelCase : str = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase : Tuple = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
return len(self.encoder )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : int = tuple(UpperCamelCase )
__UpperCAmelCase : Tuple = get_pairs(UpperCamelCase )
if not pairs:
return token
while True:
__UpperCAmelCase : str = min(UpperCamelCase , key=lambda UpperCamelCase : self.bpe_ranks.get(UpperCamelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase ,__UpperCAmelCase : str = bigram
__UpperCAmelCase : str = []
__UpperCAmelCase : str = 0
while i < len(UpperCamelCase ):
try:
__UpperCAmelCase : Dict = word.index(UpperCamelCase , UpperCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase : Tuple = j
if word[i] == first and i < len(UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase : Tuple = tuple(UpperCamelCase )
__UpperCAmelCase : str = new_word
if len(UpperCamelCase ) == 1:
break
else:
__UpperCAmelCase : Any = get_pairs(UpperCamelCase )
__UpperCAmelCase : List[str] = """ """.join(UpperCamelCase )
__UpperCAmelCase : Dict = word
return word
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = []
for token in re.findall(self.pat , UpperCamelCase ):
__UpperCAmelCase : Any = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase ).split(""" """ ) )
return bpe_tokens
def lowerCamelCase__ ( self : List[str] , UpperCamelCase : int ):
'''simple docstring'''
return self.encoder.get(UpperCamelCase , self.encoder.get(self.unk_token ) )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase : List[Any] ):
'''simple docstring'''
return self.decoder.get(UpperCamelCase )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = """""".join(UpperCamelCase )
__UpperCAmelCase : int = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase : List[Any] = os.path.join(
UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__UpperCAmelCase : List[Any] = os.path.join(
UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase , ensure_ascii=UpperCamelCase ) + """\n""" )
__UpperCAmelCase : Optional[Any] = 0
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
__UpperCAmelCase : Tuple = token_index
writer.write(""" """.join(UpperCamelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : int = [self.cls_token_id]
__UpperCAmelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase__ ( self : str , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase )) + [1]
return [1] + ([0] * len(UpperCamelCase )) + [1, 1] + ([0] * len(UpperCamelCase )) + [1]
def lowerCamelCase__ ( self : int , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
__UpperCAmelCase : Any = [self.sep_token_id]
__UpperCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[Any]=False , **UpperCamelCase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase ) > 0 and not text[0].isspace()):
__UpperCAmelCase : int = """ """ + text
return (text, kwargs)
def lowerCamelCase__ ( self : List[str] , UpperCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCamelCase : Optional[int] = None , UpperCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[bool] = None , ):
'''simple docstring'''
__UpperCAmelCase : str = super()._pad(
encoded_inputs=UpperCamelCase , max_length=UpperCamelCase , padding_strategy=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_attention_mask=UpperCamelCase , )
# Load from model defaults
if return_attention_mask is None:
__UpperCAmelCase : List[str] = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__UpperCAmelCase : Optional[int] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__UpperCAmelCase : int = len(encoded_inputs["""global_attention_mask"""] ) != len(UpperCamelCase )
if needs_to_be_padded:
__UpperCAmelCase : Optional[Any] = len(UpperCamelCase ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__UpperCAmelCase : Optional[int] = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
__UpperCAmelCase : Dict = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 139
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : List[Any] = {
'configuration_lxmert': ['LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LxmertConfig'],
'tokenization_lxmert': ['LxmertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[int] = ['LxmertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Tuple = [
'LxmertEncoder',
'LxmertForPreTraining',
'LxmertForQuestionAnswering',
'LxmertModel',
'LxmertPreTrainedModel',
'LxmertVisualFeatureEncoder',
'LxmertXLayer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = [
'TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLxmertForPreTraining',
'TFLxmertMainLayer',
'TFLxmertModel',
'TFLxmertPreTrainedModel',
'TFLxmertVisualFeatureEncoder',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 139
| 1
|
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
_lowerCAmelCase = logging.getLogger(__name__)
torch.set_grad_enabled(False)
_lowerCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
def a__ ( a , a=1_0_0 , a=" " ) -> List[str]:
A_ : Optional[int] = text.split(a )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(a ) , a )]
def a__ ( a ) -> dict:
A_ : Optional[Any] = [], []
for title, text in zip(documents['''title'''] , documents['''text'''] ):
if text is not None:
for passage in split_text(a ):
titles.append(title if title is not None else '''''' )
texts.append(a )
return {"title": titles, "text": texts}
def a__ ( a , a , a ) -> dict:
A_ : Optional[Any] = ctx_tokenizer(
documents['''title'''] , documents['''text'''] , truncation=a , padding='''longest''' , return_tensors='''pt''' )['''input_ids''']
A_ : List[Any] = ctx_encoder(input_ids.to(device=a ) , return_dict=a ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def a__ ( a , a , a , ) -> Optional[int]:
######################################
logger.info('''Step 1 - Create the dataset''' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
A_ : Union[str, Any] = load_dataset(
'''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
A_ : int = dataset.map(a , batched=a , num_proc=processing_args.num_proc )
# And compute the embeddings
A_ : Tuple = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=a )
A_ : Tuple = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
A_ : Dict = Features(
{'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space
A_ : Union[str, Any] = dataset.map(
partial(a , ctx_encoder=a , ctx_tokenizer=a ) , batched=a , batch_size=processing_args.batch_size , features=a , )
# And finally save your dataset
A_ : Any = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' )
dataset.save_to_disk(a )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
A_ : Optional[Any] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('''embeddings''' , custom_index=a )
# And save the index
A_ : int = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' )
dataset.get_index('''embeddings''' ).save(a )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class __UpperCAmelCase:
"""simple docstring"""
__magic_name__ = field(
default=str(Path(A__ ).parent / """test_run""" / """dummy-kb""" / """my_knowledge_dataset.csv""" ) , metadata={"""help""": """Path to a tab-separated csv file with columns 'title' and 'text'"""} , )
__magic_name__ = field(
default=A__ , metadata={"""help""": """Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."""} , )
__magic_name__ = field(
default="""facebook/rag-sequence-nq""" , metadata={"""help""": """The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"""} , )
__magic_name__ = field(
default="""facebook/dpr-ctx_encoder-multiset-base""" , metadata={
"""help""": (
"""The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"""
""" 'facebook/dpr-ctx_encoder-multiset-base'"""
)
} , )
__magic_name__ = field(
default=str(Path(A__ ).parent / """test_run""" / """dummy-kb""" ) , metadata={"""help""": """Path to a directory where the dataset passages and the index will be saved"""} , )
@dataclass
class __UpperCAmelCase:
"""simple docstring"""
__magic_name__ = field(
default=A__ , metadata={
"""help""": """The number of processes to use to split the documents into passages. Default is single process."""
} , )
__magic_name__ = field(
default=16 , metadata={
"""help""": """The batch size to use when computing the passages embeddings using the DPR context encoder."""
} , )
@dataclass
class __UpperCAmelCase:
"""simple docstring"""
__magic_name__ = field(
default=768 , metadata={"""help""": """The dimension of the embeddings to pass to the HNSW Faiss index."""} , )
__magic_name__ = field(
default=128 , metadata={
"""help""": (
"""The number of bi-directional links created for every new element during the HNSW index construction."""
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
_lowerCAmelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
_lowerCAmelCase = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 715
|
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {'vocab_file': 'vocab.txt'}
_lowerCAmelCase = {
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
_lowerCAmelCase = {
'facebook/esm2_t6_8M_UR50D': 1_0_2_4,
'facebook/esm2_t12_35M_UR50D': 1_0_2_4,
}
def a__ ( a ) -> Optional[int]:
with open(a , '''r''' ) as f:
A_ : Dict = f.read().splitlines()
return [l.strip() for l in lines]
class __UpperCAmelCase( A__ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ["""input_ids""", """attention_mask"""]
def __init__( self , __magic_name__ , __magic_name__="<unk>" , __magic_name__="<cls>" , __magic_name__="<pad>" , __magic_name__="<mask>" , __magic_name__="<eos>" , **__magic_name__ , ):
"""simple docstring"""
super().__init__(**__magic_name__ )
A_ : Dict = load_vocab_file(__magic_name__ )
A_ : List[str] = dict(enumerate(self.all_tokens ) )
A_ : List[str] = {tok: ind for ind, tok in enumerate(self.all_tokens )}
A_ : Optional[Any] = unk_token
A_ : str = cls_token
A_ : List[str] = pad_token
A_ : Optional[Any] = mask_token
A_ : Dict = eos_token
A_ : Optional[int] = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
return self._id_to_token.get(__magic_name__ , self.unk_token )
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
return self._token_to_id.get(__magic_name__ , self._token_to_id.get(self.unk_token ) )
def UpperCAmelCase ( self , __magic_name__ , **__magic_name__ ):
"""simple docstring"""
return text.split()
def UpperCAmelCase ( self , __magic_name__=False ):
"""simple docstring"""
return len(self._id_to_token )
def UpperCAmelCase ( self ):
"""simple docstring"""
return {token: i for i, token in enumerate(self.all_tokens )}
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
return self._token_to_id.get(__magic_name__ , self._token_to_id.get(self.unk_token ) )
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
return self._id_to_token.get(__magic_name__ , self.unk_token )
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None ):
"""simple docstring"""
A_ : Optional[int] = [self.cls_token_id]
A_ : List[Any] = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
A_ : Tuple = [1] + ([0] * len(__magic_name__ )) + [1]
if token_ids_a is not None:
mask += [0] * len(__magic_name__ ) + [1]
return mask
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ ):
"""simple docstring"""
A_ : str = os.path.join(__magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(__magic_name__ , '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return self.get_vocab_size(with_added_tokens=__magic_name__ )
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ = False ):
"""simple docstring"""
return super()._add_tokens(__magic_name__ , special_tokens=__magic_name__ )
| 236
| 0
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__A : Optional[Any] = 42
__A : Optional[Any] = 42
class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__A : Any = 1
@register_to_config
def __init__( self , __A = 2000 , __A = 0.15 , __A = 0.01 , __A = 1348.0 , __A = 1e-5 , __A = 1 , ):
"""simple docstring"""
lowerCamelCase : int = sigma_max
# setable values
lowerCamelCase : List[Any] = None
self.set_sigmas(_lowercase , _lowercase , _lowercase , _lowercase )
def _snake_case ( self , __A , __A = None ):
"""simple docstring"""
return sample
def _snake_case ( self , __A , __A = None , __A = None ):
"""simple docstring"""
lowerCamelCase : Any = sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowerCamelCase : List[str] = torch.linspace(1 , _lowercase , _lowercase , device=_lowercase )
def _snake_case ( self , __A , __A = None , __A = None , __A = None ):
"""simple docstring"""
lowerCamelCase : Dict = sigma_min if sigma_min is not None else self.config.sigma_min
lowerCamelCase : str = sigma_max if sigma_max is not None else self.config.sigma_max
lowerCamelCase : Any = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(_lowercase , _lowercase )
lowerCamelCase : List[str] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowerCamelCase : Dict = torch.exp(torch.linspace(math.log(_lowercase ) , math.log(_lowercase ) , _lowercase ) )
lowerCamelCase : Union[str, Any] = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def _snake_case ( self , __A , __A ):
"""simple docstring"""
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def _snake_case ( self , __A , __A , __A , __A = None , __A = True , ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
lowerCamelCase : str = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowerCamelCase : Tuple = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowerCamelCase : Dict = timesteps.to(self.discrete_sigmas.device )
lowerCamelCase : str = self.discrete_sigmas[timesteps].to(sample.device )
lowerCamelCase : Optional[Any] = self.get_adjacent_sigma(_lowercase , _lowercase ).to(sample.device )
lowerCamelCase : Union[str, Any] = torch.zeros_like(_lowercase )
lowerCamelCase : Optional[Any] = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowerCamelCase : Optional[int] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowerCamelCase : Optional[Any] = diffusion.unsqueeze(-1 )
lowerCamelCase : str = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowerCamelCase : Dict = randn_tensor(
sample.shape , layout=sample.layout , generator=_lowercase , device=sample.device , dtype=sample.dtype )
lowerCamelCase : Optional[Any] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowerCamelCase : Dict = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=_lowercase , prev_sample_mean=_lowercase )
def _snake_case ( self , __A , __A , __A = None , __A = True , ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowerCamelCase : List[Any] = randn_tensor(sample.shape , layout=sample.layout , generator=_lowercase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowerCamelCase : List[Any] = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
lowerCamelCase : Union[str, Any] = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
lowerCamelCase : int = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowerCamelCase : Tuple = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowerCamelCase : Tuple = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowerCamelCase : Optional[int] = step_size.unsqueeze(-1 )
lowerCamelCase : str = sample + step_size * model_output
lowerCamelCase : str = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_lowercase )
def _snake_case ( self , __A , __A , __A , ):
"""simple docstring"""
lowerCamelCase : str = timesteps.to(original_samples.device )
lowerCamelCase : Dict = self.discrete_sigmas.to(original_samples.device )[timesteps]
lowerCamelCase : Union[str, Any] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(_lowercase ) * sigmas[:, None, None, None]
)
lowerCamelCase : List[Any] = noise + original_samples
return noisy_samples
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 340
|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
A = get_tests_dir("fixtures/test_sentencepiece.model")
A = {"target_lang": "fi", "source_lang": "en"}
A = ">>zh<<"
A = "Helsinki-NLP/"
if is_torch_available():
A = "pt"
elif is_tf_available():
A = "tf"
else:
A = "jax"
@require_sentencepiece
class lowercase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__= MarianTokenizer
A__= False
A__= True
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
super().setUp()
UpperCAmelCase__ = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
UpperCAmelCase__ = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
UpperCAmelCase__ = Path(self.tmpdirname )
save_json(_lowercase , save_dir / VOCAB_FILES_NAMES["vocab"] )
save_json(_lowercase , save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(_lowercase , save_dir / VOCAB_FILES_NAMES["source_spm"] )
copyfile(_lowercase , save_dir / VOCAB_FILES_NAMES["target_spm"] )
UpperCAmelCase__ = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self : Dict , **_lowercase : Tuple ):
"""simple docstring"""
return MarianTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def _UpperCAmelCase ( self : Optional[Any] , _lowercase : Optional[int] ):
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = "</s>"
UpperCAmelCase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase )
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(_lowercase ) , 9 )
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = MarianTokenizer.from_pretrained(F"""{ORG_NAME}opus-mt-en-de""" )
UpperCAmelCase__ = en_de_tokenizer(["I am a small frog"] , return_tensors=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
UpperCAmelCase__ = [38, 1_21, 14, 6_97, 3_88_48, 0]
self.assertListEqual(_lowercase , batch.input_ids[0] )
UpperCAmelCase__ = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(_lowercase )
UpperCAmelCase__ = [x.name for x in Path(_lowercase ).glob("*" )]
self.assertIn("source.spm" , _lowercase )
MarianTokenizer.from_pretrained(_lowercase )
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = tok(
["I am a small frog" * 10_00, "I am a small frog"] , padding=_lowercase , truncation=_lowercase , return_tensors=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(batch.input_ids.shape , (2, 5_12) )
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = tok(["I am a tiny frog", "I am a small frog"] , padding=_lowercase , return_tensors=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = {"input_ids": [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name="Helsinki-NLP/opus-mt-en-de" , revision="1a8c2263da11e68e50938f97e10cd57820bd504c" , decode_kwargs={"use_source_tokenizer": True} , )
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs" )
UpperCAmelCase__ = "Tämä on testi"
UpperCAmelCase__ = "This is a test"
UpperCAmelCase__ = [76, 7, 20_47, 2]
UpperCAmelCase__ = [69, 12, 11, 9_40, 2]
UpperCAmelCase__ = tokenizer(_lowercase ).input_ids
self.assertListEqual(_lowercase , _lowercase )
UpperCAmelCase__ = tokenizer(text_target=_lowercase ).input_ids
self.assertListEqual(_lowercase , _lowercase )
UpperCAmelCase__ = tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
self.assertEqual(_lowercase , _lowercase )
| 475
| 0
|
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
lowerCAmelCase_ : int = [int(lowerCAmelCase_ ) for i in ip_va_address.split('''.''' ) if i.isdigit()]
return len(lowerCAmelCase_ ) == 4 and all(0 <= int(lowerCAmelCase_ ) <= 254 for octet in octets )
if __name__ == "__main__":
_UpperCAmelCase : List[Any] =input().strip()
_UpperCAmelCase : str ="""valid""" if is_ip_va_address_valid(ip) else """invalid"""
print(f"""{ip} is a {valid_or_invalid} IP v4 address.""")
| 619
|
import math
import qiskit
def lowerCAmelCase ( lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 )-> qiskit.result.counts.Counts:
if (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
or isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
or isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(lowerCAmelCase_ ) != input_a)
or (math.floor(lowerCAmelCase_ ) != input_a)
or (math.floor(lowerCAmelCase_ ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
lowerCAmelCase_ : str = qiskit.QuantumRegister(4 , '''qr''' )
lowerCAmelCase_ : str = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
lowerCAmelCase_ : Any = [input_a, input_a, carry_in]
lowerCAmelCase_ : int = qiskit.QuantumCircuit(lowerCAmelCase_ , lowerCAmelCase_ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(lowerCAmelCase_ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(lowerCAmelCase_ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(lowerCAmelCase_ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , lowerCAmelCase_ ) # measure the last two qbits
lowerCAmelCase_ : Tuple = qiskit.Aer.get_backend('''aer_simulator''' )
lowerCAmelCase_ : Union[str, Any] = qiskit.execute(lowerCAmelCase_ , lowerCAmelCase_ , shots=1_000 )
return job.result().get_counts(lowerCAmelCase_ )
if __name__ == "__main__":
print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
| 619
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.