code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class A__:
"""simple docstring"""
def __init__( self , _lowercase ) -> Optional[Any]:
a_ : Any = data
a_ : Node | None = None
class A__:
"""simple docstring"""
def __init__( self ) -> Dict:
a_ : str = None
a_ : List[str] = None
def __iter__( self ) -> Iterator[Any]:
a_ : Any = self.head
while self.head:
yield node.data
a_ : Dict = node.next
if node == self.head:
break
def __len__( self ) -> int:
return sum(1 for _ in self )
def __repr__( self ) -> Optional[Any]:
return "->".join(str(_lowercase ) for item in iter(self ) )
def UpperCamelCase__ ( self , _lowercase ) -> None:
self.insert_nth(len(self ) , _lowercase )
def UpperCamelCase__ ( self , _lowercase ) -> None:
self.insert_nth(0 , _lowercase )
def UpperCamelCase__ ( self , _lowercase , _lowercase ) -> None:
if index < 0 or index > len(self ):
raise IndexError("""list index out of range.""" )
a_ : Optional[int] = Node(_lowercase )
if self.head is None:
a_ : List[Any] = new_node # first node points itself
a_ : str = new_node
elif index == 0: # insert at head
a_ : List[str] = self.head
a_ : Tuple = new_node
else:
a_ : Tuple = self.head
for _ in range(index - 1 ):
a_ : Any = temp.next
a_ : List[Any] = temp.next
a_ : Union[str, Any] = new_node
if index == len(self ) - 1: # insert at tail
a_ : str = new_node
def UpperCamelCase__ ( self ) -> int:
return self.delete_nth(0 )
def UpperCamelCase__ ( self ) -> Any:
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase__ ( self , _lowercase = 0 ) -> Any:
if not 0 <= index < len(self ):
raise IndexError("""list index out of range.""" )
a_ : str = self.head
if self.head == self.tail: # just one node
a_ : Union[str, Any] = None
elif index == 0: # delete head node
a_ : List[Any] = self.tail.next.next
a_ : Optional[int] = self.head.next
else:
a_ : Any = self.head
for _ in range(index - 1 ):
a_ : Tuple = temp.next
a_ : Optional[int] = temp.next
a_ : Any = temp.next.next
if index == len(self ) - 1: # delete at tail
a_ : Optional[int] = temp
return delete_node.data
def UpperCamelCase__ ( self ) -> bool:
return len(self ) == 0
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : Any = CircularLinkedList()
assert len(a__) == 0
assert circular_linked_list.is_empty() is True
assert str(a__) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1)
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0)
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5):
assert len(a__) == i
circular_linked_list.insert_nth(a__ , i + 1)
assert str(a__) == "->".join(str(a__) for i in range(1 , 6))
circular_linked_list.insert_tail(6)
assert str(a__) == "->".join(str(a__) for i in range(1 , 7))
circular_linked_list.insert_head(0)
assert str(a__) == "->".join(str(a__) for i in range(0 , 7))
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(a__) == "->".join(str(a__) for i in range(1 , 6))
assert circular_linked_list.delete_nth(2) == 3
circular_linked_list.insert_nth(2 , 3)
assert str(a__) == "->".join(str(a__) for i in range(1 , 6))
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 540
|
__snake_case : int = [
"""Audio""",
"""Array2D""",
"""Array3D""",
"""Array4D""",
"""Array5D""",
"""ClassLabel""",
"""Features""",
"""Sequence""",
"""Value""",
"""Image""",
"""Translation""",
"""TranslationVariableLanguages""",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 540
| 1
|
'''simple docstring'''
def __lowerCamelCase ( A__ = 10 , A__ = 22 ) -> int:
"""simple docstring"""
UpperCamelCase = range(1 , _UpperCamelCase )
UpperCamelCase = range(1 , _UpperCamelCase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(10, 22) = }''')
| 716
|
'''simple docstring'''
import argparse
import json
from tqdm import tqdm
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=A__ , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=A__ , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=A__ , help='where to store parsed gold_data_path file' , )
UpperCamelCase = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
UpperCamelCase = json.load(A__ )
for dpr_record in tqdm(A__ ):
UpperCamelCase = dpr_record['question']
UpperCamelCase = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(A__ ) + '\n' )
if __name__ == "__main__":
main()
| 324
| 0
|
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def _lowercase ( __snake_case ,__snake_case=1 ) -> Optional[int]:
if n_shave_prefix_segments >= 0:
return ".".join(path.split("." )[n_shave_prefix_segments:] )
else:
return ".".join(path.split("." )[:n_shave_prefix_segments] )
def _lowercase ( __snake_case ,__snake_case=0 ) -> Optional[Any]:
__lowerCAmelCase : Union[str, Any] = []
for old_item in old_list:
__lowerCAmelCase : Dict = old_item.replace("in_layers.0" ,"norm1" )
__lowerCAmelCase : Optional[Any] = new_item.replace("in_layers.2" ,"conv1" )
__lowerCAmelCase : Optional[int] = new_item.replace("out_layers.0" ,"norm2" )
__lowerCAmelCase : Optional[Any] = new_item.replace("out_layers.3" ,"conv2" )
__lowerCAmelCase : Optional[Any] = new_item.replace("emb_layers.1" ,"time_emb_proj" )
__lowerCAmelCase : Any = new_item.replace("skip_connection" ,"conv_shortcut" )
__lowerCAmelCase : str = shave_segments(lowerCamelCase__ ,n_shave_prefix_segments=lowerCamelCase__ )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def _lowercase ( __snake_case ,__snake_case=0 ) -> str:
__lowerCAmelCase : Dict = []
for old_item in old_list:
__lowerCAmelCase : Any = old_item
__lowerCAmelCase : Union[str, Any] = new_item.replace("norm.weight" ,"group_norm.weight" )
__lowerCAmelCase : Optional[int] = new_item.replace("norm.bias" ,"group_norm.bias" )
__lowerCAmelCase : Optional[int] = new_item.replace("proj_out.weight" ,"proj_attn.weight" )
__lowerCAmelCase : List[str] = new_item.replace("proj_out.bias" ,"proj_attn.bias" )
__lowerCAmelCase : str = shave_segments(lowerCamelCase__ ,n_shave_prefix_segments=lowerCamelCase__ )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case=None ,__snake_case=None ,__snake_case=None ) -> Optional[int]:
assert isinstance(lowerCamelCase__ ,lowerCamelCase__ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
__lowerCAmelCase : List[Any] = old_checkpoint[path]
__lowerCAmelCase : Dict = old_tensor.shape[0] // 3
__lowerCAmelCase : Optional[int] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
__lowerCAmelCase : str = old_tensor.shape[0] // config["num_head_channels"] // 3
__lowerCAmelCase : Union[str, Any] = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
__lowerCAmelCase : Union[str, Any] = old_tensor.split(channels // num_heads ,dim=1 )
__lowerCAmelCase : str = query.reshape(lowerCamelCase__ )
__lowerCAmelCase : List[Any] = key.reshape(lowerCamelCase__ )
__lowerCAmelCase : str = value.reshape(lowerCamelCase__ )
for path in paths:
__lowerCAmelCase : List[str] = path["new"]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
__lowerCAmelCase : int = new_path.replace("middle_block.0" ,"mid_block.resnets.0" )
__lowerCAmelCase : int = new_path.replace("middle_block.1" ,"mid_block.attentions.0" )
__lowerCAmelCase : Any = new_path.replace("middle_block.2" ,"mid_block.resnets.1" )
if additional_replacements is not None:
for replacement in additional_replacements:
__lowerCAmelCase : Union[str, Any] = new_path.replace(replacement["old"] ,replacement["new"] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
__lowerCAmelCase : Optional[Any] = old_checkpoint[path["old"]][:, :, 0]
else:
__lowerCAmelCase : Any = old_checkpoint[path["old"]]
def _lowercase ( __snake_case ,__snake_case ) -> Tuple:
__lowerCAmelCase : Tuple = {}
__lowerCAmelCase : List[str] = checkpoint["time_embed.0.weight"]
__lowerCAmelCase : Optional[Any] = checkpoint["time_embed.0.bias"]
__lowerCAmelCase : Any = checkpoint["time_embed.2.weight"]
__lowerCAmelCase : Union[str, Any] = checkpoint["time_embed.2.bias"]
__lowerCAmelCase : int = checkpoint["input_blocks.0.0.weight"]
__lowerCAmelCase : Tuple = checkpoint["input_blocks.0.0.bias"]
__lowerCAmelCase : int = checkpoint["out.0.weight"]
__lowerCAmelCase : Optional[int] = checkpoint["out.0.bias"]
__lowerCAmelCase : Optional[int] = checkpoint["out.2.weight"]
__lowerCAmelCase : List[str] = checkpoint["out.2.bias"]
# Retrieves the keys for the input blocks only
__lowerCAmelCase : Union[str, Any] = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "input_blocks" in layer} )
__lowerCAmelCase : Dict = {
layer_id: [key for key in checkpoint if F"""input_blocks.{layer_id}""" in key]
for layer_id in range(lowerCamelCase__ )
}
# Retrieves the keys for the middle blocks only
__lowerCAmelCase : Tuple = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "middle_block" in layer} )
__lowerCAmelCase : List[Any] = {
layer_id: [key for key in checkpoint if F"""middle_block.{layer_id}""" in key]
for layer_id in range(lowerCamelCase__ )
}
# Retrieves the keys for the output blocks only
__lowerCAmelCase : Optional[Any] = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "output_blocks" in layer} )
__lowerCAmelCase : Optional[int] = {
layer_id: [key for key in checkpoint if F"""output_blocks.{layer_id}""" in key]
for layer_id in range(lowerCamelCase__ )
}
for i in range(1 ,lowerCamelCase__ ):
__lowerCAmelCase : List[Any] = (i - 1) // (config["num_res_blocks"] + 1)
__lowerCAmelCase : List[Any] = (i - 1) % (config["num_res_blocks"] + 1)
__lowerCAmelCase : Optional[int] = [key for key in input_blocks[i] if F"""input_blocks.{i}.0""" in key]
__lowerCAmelCase : Union[str, Any] = [key for key in input_blocks[i] if F"""input_blocks.{i}.1""" in key]
if F"""input_blocks.{i}.0.op.weight""" in checkpoint:
__lowerCAmelCase : Optional[int] = checkpoint[
F"""input_blocks.{i}.0.op.weight"""
]
__lowerCAmelCase : int = checkpoint[
F"""input_blocks.{i}.0.op.bias"""
]
continue
__lowerCAmelCase : Optional[Any] = renew_resnet_paths(lowerCamelCase__ )
__lowerCAmelCase : List[Any] = {"old": F"""input_blocks.{i}.0""", "new": F"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
__lowerCAmelCase : Tuple = {"old": "resnets.2.op", "new": "downsamplers.0.op"}
assign_to_checkpoint(
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,additional_replacements=[meta_path, resnet_op] ,config=lowerCamelCase__ )
if len(lowerCamelCase__ ):
__lowerCAmelCase : List[Any] = renew_attention_paths(lowerCamelCase__ )
__lowerCAmelCase : Any = {
"old": F"""input_blocks.{i}.1""",
"new": F"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
__lowerCAmelCase : Dict = {
F"""input_blocks.{i}.1.qkv.bias""": {
"key": F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
"query": F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
"value": F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
F"""input_blocks.{i}.1.qkv.weight""": {
"key": F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
"query": F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
"value": F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,additional_replacements=[meta_path] ,attention_paths_to_split=lowerCamelCase__ ,config=lowerCamelCase__ ,)
__lowerCAmelCase : str = middle_blocks[0]
__lowerCAmelCase : str = middle_blocks[1]
__lowerCAmelCase : List[str] = middle_blocks[2]
__lowerCAmelCase : List[Any] = renew_resnet_paths(lowerCamelCase__ )
assign_to_checkpoint(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,config=lowerCamelCase__ )
__lowerCAmelCase : Tuple = renew_resnet_paths(lowerCamelCase__ )
assign_to_checkpoint(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,config=lowerCamelCase__ )
__lowerCAmelCase : List[str] = renew_attention_paths(lowerCamelCase__ )
__lowerCAmelCase : Optional[Any] = {
"middle_block.1.qkv.bias": {
"key": "mid_block.attentions.0.key.bias",
"query": "mid_block.attentions.0.query.bias",
"value": "mid_block.attentions.0.value.bias",
},
"middle_block.1.qkv.weight": {
"key": "mid_block.attentions.0.key.weight",
"query": "mid_block.attentions.0.query.weight",
"value": "mid_block.attentions.0.value.weight",
},
}
assign_to_checkpoint(
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,attention_paths_to_split=lowerCamelCase__ ,config=lowerCamelCase__ )
for i in range(lowerCamelCase__ ):
__lowerCAmelCase : str = i // (config["num_res_blocks"] + 1)
__lowerCAmelCase : List[str] = i % (config["num_res_blocks"] + 1)
__lowerCAmelCase : Optional[int] = [shave_segments(lowerCamelCase__ ,2 ) for name in output_blocks[i]]
__lowerCAmelCase : Optional[int] = {}
for layer in output_block_layers:
__lowerCAmelCase : Optional[Any] = layer.split("." )[0], shave_segments(lowerCamelCase__ ,1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(lowerCamelCase__ )
else:
__lowerCAmelCase : List[str] = [layer_name]
if len(lowerCamelCase__ ) > 1:
__lowerCAmelCase : Tuple = [key for key in output_blocks[i] if F"""output_blocks.{i}.0""" in key]
__lowerCAmelCase : Tuple = [key for key in output_blocks[i] if F"""output_blocks.{i}.1""" in key]
__lowerCAmelCase : Union[str, Any] = renew_resnet_paths(lowerCamelCase__ )
__lowerCAmelCase : Optional[int] = renew_resnet_paths(lowerCamelCase__ )
__lowerCAmelCase : List[str] = {"old": F"""output_blocks.{i}.0""", "new": F"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,additional_replacements=[meta_path] ,config=lowerCamelCase__ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
__lowerCAmelCase : Union[str, Any] = list(output_block_list.values() ).index(["conv.weight", "conv.bias"] )
__lowerCAmelCase : Optional[int] = checkpoint[
F"""output_blocks.{i}.{index}.conv.weight"""
]
__lowerCAmelCase : Optional[int] = checkpoint[
F"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(lowerCamelCase__ ) == 2:
__lowerCAmelCase : Optional[int] = []
if len(lowerCamelCase__ ):
__lowerCAmelCase : Any = renew_attention_paths(lowerCamelCase__ )
__lowerCAmelCase : Union[str, Any] = {
"old": F"""output_blocks.{i}.1""",
"new": F"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
__lowerCAmelCase : str = {
F"""output_blocks.{i}.1.qkv.bias""": {
"key": F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
"query": F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
"value": F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
F"""output_blocks.{i}.1.qkv.weight""": {
"key": F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
"query": F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
"value": F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,additional_replacements=[meta_path] ,attention_paths_to_split=to_split if any("qkv" in key for key in attentions ) else None ,config=lowerCamelCase__ ,)
else:
__lowerCAmelCase : List[Any] = renew_resnet_paths(lowerCamelCase__ ,n_shave_prefix_segments=1 )
for path in resnet_0_paths:
__lowerCAmelCase : Any = ".".join(["output_blocks", str(lowerCamelCase__ ), path["old"]] )
__lowerCAmelCase : Tuple = ".".join(["up_blocks", str(lowerCamelCase__ ), "resnets", str(lowerCamelCase__ ), path["new"]] )
__lowerCAmelCase : Dict = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__snake_case : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
__snake_case : List[Any] = parser.parse_args()
__snake_case : Dict = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__snake_case : Dict = json.loads(f.read())
__snake_case : Tuple = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__snake_case : int = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__snake_case : List[str] = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1]))
__snake_case : List[Any] = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1]))
__snake_case : Optional[int] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 293
|
"""simple docstring"""
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class UpperCamelCase :
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=30 , snake_case__=2 , snake_case__=3 , snake_case__=True , snake_case__=True , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=10 , snake_case__=0.02 , snake_case__=3 , snake_case__=None , snake_case__=2 , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = parent
_SCREAMING_SNAKE_CASE : str = batch_size
_SCREAMING_SNAKE_CASE : List[str] = image_size
_SCREAMING_SNAKE_CASE : Optional[int] = patch_size
_SCREAMING_SNAKE_CASE : List[str] = num_channels
_SCREAMING_SNAKE_CASE : Dict = is_training
_SCREAMING_SNAKE_CASE : Optional[int] = use_labels
_SCREAMING_SNAKE_CASE : Tuple = hidden_size
_SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
_SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
_SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
_SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
_SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : Optional[Any] = type_sequence_label_size
_SCREAMING_SNAKE_CASE : Any = initializer_range
_SCREAMING_SNAKE_CASE : Any = scope
_SCREAMING_SNAKE_CASE : List[str] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
_SCREAMING_SNAKE_CASE : Optional[Any] = (image_size // patch_size) ** 2
_SCREAMING_SNAKE_CASE : int = num_patches + 2
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE : str = None
if self.use_labels:
_SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE : int = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = DeiTModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_SCREAMING_SNAKE_CASE : Dict = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = DeiTForMaskedImageModeling(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(snake_case__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_SCREAMING_SNAKE_CASE : Optional[Any] = 1
_SCREAMING_SNAKE_CASE : Dict = DeiTForMaskedImageModeling(snake_case__ )
model.to(snake_case__ )
model.eval()
_SCREAMING_SNAKE_CASE : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE : Tuple = model(snake_case__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = self.type_sequence_label_size
_SCREAMING_SNAKE_CASE : Tuple = DeiTForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
_SCREAMING_SNAKE_CASE : int = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_SCREAMING_SNAKE_CASE : Optional[Any] = 1
_SCREAMING_SNAKE_CASE : Optional[int] = DeiTForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
_SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE : str = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) : int = config_and_inputs
_SCREAMING_SNAKE_CASE : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__ = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
A__ = (
{
"""feature-extraction""": DeiTModel,
"""image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
A__ = False
A__ = False
A__ = False
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = DeiTModelTester(self )
_SCREAMING_SNAKE_CASE : str = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Optional[int] = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_SCREAMING_SNAKE_CASE : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(snake_case__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE : str = [*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*snake_case__ )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__=False ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Optional[Any] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(snake_case__ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
_SCREAMING_SNAKE_CASE : Optional[Any] = model_class(snake_case__ )
model.to(snake_case__ )
model.train()
_SCREAMING_SNAKE_CASE : str = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
_SCREAMING_SNAKE_CASE : List[str] = model(**snake_case__ ).loss
loss.backward()
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_SCREAMING_SNAKE_CASE : Optional[int] = False
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(snake_case__ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
_SCREAMING_SNAKE_CASE : Any = model_class(snake_case__ )
model.gradient_checkpointing_enable()
model.to(snake_case__ )
model.train()
_SCREAMING_SNAKE_CASE : Optional[int] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = model(**snake_case__ ).loss
loss.backward()
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Tuple = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(snake_case__ ),
*get_values(snake_case__ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type["title"]}''' ):
_SCREAMING_SNAKE_CASE : Optional[int] = problem_type["title"]
_SCREAMING_SNAKE_CASE : List[str] = problem_type["num_labels"]
_SCREAMING_SNAKE_CASE : str = model_class(snake_case__ )
model.to(snake_case__ )
model.train()
_SCREAMING_SNAKE_CASE : Union[str, Any] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if problem_type["num_labels"] > 1:
_SCREAMING_SNAKE_CASE : Tuple = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
_SCREAMING_SNAKE_CASE : List[str] = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=snake_case__ ) as warning_list:
_SCREAMING_SNAKE_CASE : str = model(**snake_case__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE : Optional[Any] = DeiTModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def _lowerCAmelCase ( ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
@cached_property
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to(
snake_case__ )
_SCREAMING_SNAKE_CASE : str = self.default_image_processor
_SCREAMING_SNAKE_CASE : str = prepare_img()
_SCREAMING_SNAKE_CASE : str = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Optional[Any] = model(**snake_case__ )
# verify the logits
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , snake_case__ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = DeiTModel.from_pretrained(
"facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" )
_SCREAMING_SNAKE_CASE : Any = self.default_image_processor
_SCREAMING_SNAKE_CASE : Dict = prepare_img()
_SCREAMING_SNAKE_CASE : Dict = image_processor(images=snake_case__ , return_tensors="pt" )
_SCREAMING_SNAKE_CASE : str = inputs.pixel_values.to(snake_case__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Optional[Any] = model(snake_case__ )
| 572
| 0
|
"""simple docstring"""
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[Any]:
"""simple docstring"""
if isinstance(_lowerCAmelCase ,torch.Tensor ):
return image
elif isinstance(_lowerCAmelCase ,PIL.Image.Image ):
_UpperCamelCase : List[Any] = [image]
if isinstance(image[0] ,PIL.Image.Image ):
_UpperCamelCase : Any = [np.array(i.resize((w, h) ,resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
_UpperCamelCase : List[str] = np.concatenate(_lowerCAmelCase ,axis=0 )
_UpperCamelCase : Tuple = np.array(_lowerCAmelCase ).astype(np.floataa ) / 255.0
_UpperCamelCase : Optional[Any] = image.transpose(0 ,3 ,1 ,2 )
_UpperCamelCase : List[str] = 2.0 * image - 1.0
_UpperCamelCase : List[Any] = torch.from_numpy(_lowerCAmelCase )
elif isinstance(image[0] ,torch.Tensor ):
_UpperCamelCase : List[Any] = torch.cat(_lowerCAmelCase ,dim=0 )
return image
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_=0.9995 ) -> Any:
"""simple docstring"""
if not isinstance(_lowerCAmelCase ,np.ndarray ):
_UpperCamelCase : Tuple = True
_UpperCamelCase : Any = va.device
_UpperCamelCase : Tuple = va.cpu().numpy()
_UpperCamelCase : Tuple = va.cpu().numpy()
_UpperCamelCase : List[Any] = np.sum(va * va / (np.linalg.norm(_lowerCAmelCase ) * np.linalg.norm(_lowerCAmelCase )) )
if np.abs(_lowerCAmelCase ) > DOT_THRESHOLD:
_UpperCamelCase : List[str] = (1 - t) * va + t * va
else:
_UpperCamelCase : List[Any] = np.arccos(_lowerCAmelCase )
_UpperCamelCase : Tuple = np.sin(_lowerCAmelCase )
_UpperCamelCase : List[str] = theta_a * t
_UpperCamelCase : List[Any] = np.sin(_lowerCAmelCase )
_UpperCamelCase : List[Any] = np.sin(theta_a - theta_t ) / sin_theta_a
_UpperCamelCase : int = sin_theta_t / sin_theta_a
_UpperCamelCase : List[Any] = sa * va + sa * va
if inputs_are_torch:
_UpperCamelCase : int = torch.from_numpy(_lowerCAmelCase ).to(_lowerCAmelCase )
return va
def lowercase__ ( lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = F.normalize(_lowerCAmelCase ,dim=-1 )
_UpperCamelCase : Union[str, Any] = F.normalize(_lowerCAmelCase ,dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def lowercase__ ( lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
for param in model.parameters():
_UpperCamelCase : Tuple = value
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
def __init__( self : List[Any] , __a : AutoencoderKL , __a : CLIPTextModel , __a : CLIPModel , __a : CLIPTokenizer , __a : UNetaDConditionModel , __a : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , __a : CLIPFeatureExtractor , __a : List[str]=None , __a : Dict=None , __a : int=None , ) -> Optional[int]:
super().__init__()
self.register_modules(
vae=__A , text_encoder=__A , clip_model=__A , tokenizer=__A , unet=__A , scheduler=__A , feature_extractor=__A , coca_model=__A , coca_tokenizer=__A , coca_transform=__A , )
_UpperCamelCase : Any = (
feature_extractor.size
if isinstance(feature_extractor.size , __A )
else feature_extractor.size["shortest_edge"]
)
_UpperCamelCase : str = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , __A )
set_requires_grad(self.clip_model , __A )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Optional[Union[str, int]] = "auto" ) -> List[Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_UpperCamelCase : Optional[int] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__A )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
self.enable_attention_slicing(__A )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
set_requires_grad(self.vae , __A )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
set_requires_grad(self.vae , __A )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
set_requires_grad(self.unet , __A )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
set_requires_grad(self.unet , __A )
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Tuple , __a : Tuple , __a : List[str] ) -> int:
# get the original timestep using init_timestep
_UpperCamelCase : Union[str, Any] = min(int(num_inference_steps * strength ) , __A )
_UpperCamelCase : int = max(num_inference_steps - init_timestep , 0 )
_UpperCamelCase : Union[str, Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : Tuple , __a : Optional[int] , __a : Union[str, Any] , __a : Dict , __a : List[Any] , __a : Optional[Any]=None ) -> str:
if not isinstance(__A , torch.Tensor ):
raise ValueError(F'''`image` has to be of type `torch.Tensor` but is {type(__A )}''' )
_UpperCamelCase : List[str] = image.to(device=__A , dtype=__A )
if isinstance(__A , __A ):
_UpperCamelCase : str = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__A )
]
_UpperCamelCase : List[str] = torch.cat(__A , dim=0 )
else:
_UpperCamelCase : Union[str, Any] = self.vae.encode(__A ).latent_dist.sample(__A )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_UpperCamelCase : int = 0.1_82_15 * init_latents
_UpperCamelCase : int = init_latents.repeat_interleave(__A , dim=0 )
_UpperCamelCase : List[Any] = randn_tensor(init_latents.shape , generator=__A , device=__A , dtype=__A )
# get latents
_UpperCamelCase : Union[str, Any] = self.scheduler.add_noise(__A , __A , __A )
_UpperCamelCase : Union[str, Any] = init_latents
return latents
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Optional[int] ) -> Optional[int]:
_UpperCamelCase : str = self.coca_transform(__A ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
_UpperCamelCase : Optional[Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
_UpperCamelCase : int = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("<end_of_text>" )[0].replace("<start_of_text>" , "" ).rstrip(" .," )
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Tuple , __a : Tuple ) -> List[str]:
_UpperCamelCase : Optional[int] = self.feature_extractor.preprocess(__A )
_UpperCamelCase : Optional[Any] = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half()
_UpperCamelCase : Dict = self.clip_model.get_image_features(__A )
_UpperCamelCase : Any = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__A )
_UpperCamelCase : List[str] = image_embeddings_clip.repeat_interleave(__A , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def __SCREAMING_SNAKE_CASE ( self : str , __a : List[str] , __a : Union[str, Any] , __a : List[Any] , __a : Dict , __a : List[Any] , __a : Union[str, Any] , __a : Union[str, Any] , ) -> Union[str, Any]:
_UpperCamelCase : Dict = latents.detach().requires_grad_()
_UpperCamelCase : Dict = self.scheduler.scale_model_input(__A , __A )
# predict the noise residual
_UpperCamelCase : int = self.unet(__A , __A , encoder_hidden_states=__A ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
_UpperCamelCase : List[str] = self.scheduler.alphas_cumprod[timestep]
_UpperCamelCase : Union[str, Any] = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCamelCase : List[str] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_UpperCamelCase : str = torch.sqrt(__A )
_UpperCamelCase : List[Any] = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , __A ):
_UpperCamelCase : List[str] = self.scheduler.sigmas[index]
_UpperCamelCase : Optional[Any] = latents - sigma * noise_pred
else:
raise ValueError(F'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_UpperCamelCase : List[Any] = 1 / 0.1_82_15 * sample
_UpperCamelCase : Any = self.vae.decode(__A ).sample
_UpperCamelCase : Optional[int] = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCamelCase : str = transforms.Resize(self.feature_extractor_size )(__A )
_UpperCamelCase : Optional[int] = self.normalize(__A ).to(latents.dtype )
_UpperCamelCase : Any = self.clip_model.get_image_features(__A )
_UpperCamelCase : Union[str, Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__A )
_UpperCamelCase : Union[str, Any] = spherical_dist_loss(__A , __A ).mean() * clip_guidance_scale
_UpperCamelCase : str = -torch.autograd.grad(__A , __A )[0]
if isinstance(self.scheduler , __A ):
_UpperCamelCase : List[str] = latents.detach() + grads * (sigma**2)
_UpperCamelCase : Optional[Any] = noise_pred_original
else:
_UpperCamelCase : List[Any] = noise_pred_original - torch.sqrt(__A ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : Optional[int] , __a : Union[torch.FloatTensor, PIL.Image.Image] , __a : Union[torch.FloatTensor, PIL.Image.Image] , __a : Optional[str] = None , __a : Optional[str] = None , __a : Optional[int] = 512 , __a : Optional[int] = 512 , __a : float = 0.6 , __a : Optional[int] = 50 , __a : Optional[float] = 7.5 , __a : Optional[int] = 1 , __a : float = 0.0 , __a : Optional[float] = 100 , __a : Optional[torch.Generator] = None , __a : Optional[str] = "pil" , __a : bool = True , __a : float = 0.8 , __a : float = 0.1 , __a : float = 0.1 , ) -> Optional[int]:
if isinstance(__A , __A ) and len(__A ) != batch_size:
raise ValueError(F'''You have passed {batch_size} batch_size, but only {len(__A )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(__A , torch.Generator ) and batch_size > 1:
_UpperCamelCase : List[str] = [generator] + [None] * (batch_size - 1)
_UpperCamelCase : Dict = [
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
_UpperCamelCase : str = [x[0] for x in coca_is_none if x[1]]
_UpperCamelCase : List[Any] = ", ".join(__A )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(__A ):
raise ValueError(
F'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
F'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
_UpperCamelCase : List[str] = self.get_image_description(__A )
if style_prompt is None:
if len(__A ):
raise ValueError(
F'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
F''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
_UpperCamelCase : List[str] = self.get_image_description(__A )
# get prompt text embeddings for content and style
_UpperCamelCase : str = self.tokenizer(
__A , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=__A , return_tensors="pt" , )
_UpperCamelCase : Optional[int] = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
_UpperCamelCase : Optional[int] = self.tokenizer(
__A , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=__A , return_tensors="pt" , )
_UpperCamelCase : List[str] = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
_UpperCamelCase : Optional[int] = slerp(__A , __A , __A )
# duplicate text embeddings for each generation per prompt
_UpperCamelCase : Optional[int] = text_embeddings.repeat_interleave(__A , dim=0 )
# set timesteps
_UpperCamelCase : List[str] = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
_UpperCamelCase : Union[str, Any] = {}
if accepts_offset:
_UpperCamelCase : Dict = 1
self.scheduler.set_timesteps(__A , **__A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
_UpperCamelCase : Any = self.get_timesteps(__A , __A , self.device )
_UpperCamelCase : Optional[int] = timesteps[:1].repeat(__A )
# Preprocess image
_UpperCamelCase : int = preprocess(__A , __A , __A )
_UpperCamelCase : Optional[int] = self.prepare_latents(
__A , __A , __A , text_embeddings.dtype , self.device , __A )
_UpperCamelCase : Tuple = preprocess(__A , __A , __A )
_UpperCamelCase : Optional[int] = self.prepare_latents(
__A , __A , __A , text_embeddings.dtype , self.device , __A )
_UpperCamelCase : Tuple = slerp(__A , __A , __A )
if clip_guidance_scale > 0:
_UpperCamelCase : Dict = self.get_clip_image_embeddings(__A , __A )
_UpperCamelCase : Dict = self.get_clip_image_embeddings(__A , __A )
_UpperCamelCase : Any = slerp(
__A , __A , __A )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_UpperCamelCase : List[str] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_UpperCamelCase : Union[str, Any] = content_text_input.input_ids.shape[-1]
_UpperCamelCase : Tuple = self.tokenizer([""] , padding="max_length" , max_length=__A , return_tensors="pt" )
_UpperCamelCase : List[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
_UpperCamelCase : str = uncond_embeddings.repeat_interleave(__A , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCamelCase : Optional[Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_UpperCamelCase : List[str] = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_UpperCamelCase : int = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_UpperCamelCase : List[Any] = torch.randn(__A , generator=__A , device="cpu" , dtype=__A ).to(
self.device )
else:
_UpperCamelCase : Any = torch.randn(__A , generator=__A , device=self.device , dtype=__A )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
_UpperCamelCase : str = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_UpperCamelCase : List[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_UpperCamelCase : str = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_UpperCamelCase : Any = {}
if accepts_eta:
_UpperCamelCase : List[str] = eta
# check if the scheduler accepts generator
_UpperCamelCase : str = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
_UpperCamelCase : str = generator
with self.progress_bar(total=__A ):
for i, t in enumerate(__A ):
# expand the latents if we are doing classifier free guidance
_UpperCamelCase : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_UpperCamelCase : Union[str, Any] = self.scheduler.scale_model_input(__A , __A )
# predict the noise residual
_UpperCamelCase : Optional[Any] = self.unet(__A , __A , encoder_hidden_states=__A ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_UpperCamelCase : Any = noise_pred.chunk(2 )
_UpperCamelCase : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_UpperCamelCase : Optional[Any] = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
_UpperCamelCase : Dict = self.cond_fn(
__A , __A , __A , __A , __A , __A , __A , )
# compute the previous noisy sample x_t -> x_t-1
_UpperCamelCase : Any = self.scheduler.step(__A , __A , __A , **__A ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_UpperCamelCase : List[Any] = 1 / 0.1_82_15 * latents
_UpperCamelCase : Dict = self.vae.decode(__A ).sample
_UpperCamelCase : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCamelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCamelCase : List[str] = self.numpy_to_pil(__A )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=__A , nsfw_content_detected=__A )
| 711
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = field(default="summarization" , metadata={"include_in_asdict_even_if_is_default": True} )
SCREAMING_SNAKE_CASE__ :ClassVar[Features] = Features({"text": Value("string" )} )
SCREAMING_SNAKE_CASE__ :ClassVar[Features] = Features({"summary": Value("string" )} )
SCREAMING_SNAKE_CASE__ :str = "text"
SCREAMING_SNAKE_CASE__ :str = "summary"
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 51
| 0
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
'''simple docstring'''
torch.manual_seed(0)
A__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
@property
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0)
A__ = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , )
return model
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[Any]:
'''simple docstring'''
torch.manual_seed(0)
A__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
'''simple docstring'''
A__ = self.dummy_uncond_unet
A__ = DDIMScheduler()
A__ = self.dummy_vq_model
A__ = LDMPipeline(unet=UpperCAmelCase__ , vqvae=UpperCAmelCase__ , scheduler=UpperCAmelCase__)
ldm.to(UpperCAmelCase__)
ldm.set_progress_bar_config(disable=UpperCAmelCase__)
A__ = torch.manual_seed(0)
A__ = ldm(generator=UpperCAmelCase__ , num_inference_steps=2 , output_type='''numpy''').images
A__ = torch.manual_seed(0)
A__ = ldm(generator=UpperCAmelCase__ , num_inference_steps=2 , output_type='''numpy''' , return_dict=UpperCAmelCase__)[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A__ = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172])
A__ = 1e-2 if torch_device != '''mps''' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < tolerance
@slow
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : str) ->Dict:
'''simple docstring'''
A__ = LDMPipeline.from_pretrained('''CompVis/ldm-celebahq-256''')
ldm.to(UpperCAmelCase__)
ldm.set_progress_bar_config(disable=UpperCAmelCase__)
A__ = torch.manual_seed(0)
A__ = ldm(generator=UpperCAmelCase__ , num_inference_steps=5 , output_type='''numpy''').images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
A__ = np.array([0.4399, 0.44975, 0.46825, 0.474, 0.4359, 0.4581, 0.45095, 0.4341, 0.4447])
A__ = 1e-2 if torch_device != '''mps''' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice).max() < tolerance
| 87
|
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class _lowerCamelCase ( a ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=None , **UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__snake_case : List[Any] = parent
__snake_case : str = config_class
__snake_case : str = has_text_modality
__snake_case : Any = kwargs
__snake_case : Union[str, Any] = common_properties
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case : List[str] = self.config_class(**self.inputs_dict )
__snake_case : Optional[int] = (
["hidden_size", "num_attention_heads", "num_hidden_layers"]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["vocab_size"] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(UpperCAmelCase , UpperCAmelCase ) , msg=F"""`{prop}` does not exist""" )
# Test that config has the common properties as setter
for idx, name in enumerate(UpperCAmelCase ):
try:
setattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
self.parent.assertEqual(
getattr(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase , msg=F"""`{name} value {idx} expected, but was {getattr(UpperCAmelCase , UpperCAmelCase )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(UpperCAmelCase ):
try:
__snake_case : Tuple = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase , msg=F"""`{name} value {idx} expected, but was {getattr(UpperCAmelCase , UpperCAmelCase )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case : int = self.config_class(**self.inputs_dict )
__snake_case : Optional[int] = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , UpperCAmelCase )
def UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case : List[Any] = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : int = os.path.join(UpperCAmelCase , "config.json" )
config_first.to_json_file(UpperCAmelCase )
__snake_case : List[str] = self.config_class.from_json_file(UpperCAmelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case : Dict = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(UpperCAmelCase )
__snake_case : int = self.config_class.from_pretrained(UpperCAmelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Tuple = self.config_class(**self.inputs_dict )
__snake_case : Union[str, Any] = "test"
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : Optional[Any] = os.path.join(UpperCAmelCase , UpperCAmelCase )
config_first.save_pretrained(UpperCAmelCase )
__snake_case : Dict = self.config_class.from_pretrained(UpperCAmelCase , subfolder=UpperCAmelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case : str = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
__snake_case : Union[str, Any] = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
if self.config_class.is_composition:
return
__snake_case : Tuple = self.config_class()
self.parent.assertIsNotNone(UpperCAmelCase )
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case : Any = copy.deepcopy(UpperCAmelCase )
__snake_case : List[str] = self.config_class(**UpperCAmelCase )
__snake_case : int = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("torch_dtype", config.torch_dtype, torch.floataa) )
elif getattr(UpperCAmelCase , UpperCAmelCase ) != value:
wrong_values.append((key, getattr(UpperCAmelCase , UpperCAmelCase ), value) )
if len(UpperCAmelCase ) > 0:
__snake_case : str = "\n".join([F"""- {v[0]}: got {v[1]} instead of {v[2]}""" for v in wrong_values] )
raise ValueError(F"""The following keys were not properly set in the config:\n{errors}""" )
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 243
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Any = """facebook/bart-large-mnli"""
_snake_case : List[str] = (
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
_snake_case : Tuple = """text_classifier"""
_snake_case : Tuple = AutoTokenizer
_snake_case : Any = AutoModelForSequenceClassification
_snake_case : Optional[int] = ["""text""", ["""text"""]]
_snake_case : int = ["""text"""]
def __a ( self :Any ):
super().setup()
UpperCamelCase__ :Tuple = self.model.config
UpperCamelCase__ :Optional[int] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
UpperCamelCase__ :Any = int(lowercase_ )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def __a ( self :Any , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Tuple ):
UpperCamelCase__ :Optional[int] = labels
return self.pre_processor(
[text] * len(lowercase_ ) , [f"""This example is {label}""" for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def __a ( self :Tuple , lowerCamelCase__ :Union[str, Any] ):
UpperCamelCase__ :int = outputs.logits
UpperCamelCase__ :int = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 716
|
UpperCamelCase = 8.3_144_598
def A ( lowercase__ : float , lowercase__ : float ) -> float:
if temperature < 0:
raise Exception("""Temperature cannot be less than 0 K""" )
if molar_mass <= 0:
raise Exception("""Molar mass cannot be less than or equal to 0 kg/mol""" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
UpperCamelCase = 300
UpperCamelCase = 28
UpperCamelCase = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 383
| 0
|
import cva
import numpy as np
class _A:
"""simple docstring"""
def __init__( self , _A , _A ):
if k in (0.0_4, 0.0_6):
__A : int = k
__A : List[str] = window_size
else:
raise ValueError('invalid k value' )
def __str__( self ):
return str(self.k )
def UpperCAmelCase_ ( self , _A ):
__A : Dict = cva.imread(__snake_case , 0 )
__A : str = img.shape
__A : list[list[int]] = []
__A : Tuple = img.copy()
__A : Dict = cva.cvtColor(__snake_case , cva.COLOR_GRAY2RGB )
__A : Optional[int] = np.gradient(__snake_case )
__A : Union[str, Any] = dx**2
__A : Optional[int] = dy**2
__A : Any = dx * dy
__A : Tuple = 0.0_4
__A : Optional[int] = self.window_size // 2
for y in range(__snake_case , h - offset ):
for x in range(__snake_case , w - offset ):
__A : List[str] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__A : Tuple = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__A : str = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__A : Optional[Any] = (wxx * wyy) - (wxy**2)
__A : List[Any] = wxx + wyy
__A : str = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
UpperCAmelCase : Optional[Any] = HarrisCorner(0.04, 3)
UpperCAmelCase , UpperCAmelCase : str = edge_detect.detect('''path_to_image''')
cva.imwrite('''detect.png''', color_img)
| 239
|
"""simple docstring"""
def a ( __UpperCAmelCase : int = 1_0_0 ) -> int:
__magic_name__: str = 0
__magic_name__: Any = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 96
| 0
|
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE:
"""simple docstring"""
@staticmethod
def A ( *__snake_case : Dict , **__snake_case : List[Any] ) -> Optional[int]:
pass
@is_pipeline_test
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def A ( self : List[str] , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase : Tuple = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
UpperCAmelCase : Tuple = [
{
'''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def A ( self : Tuple , __snake_case : Optional[Any] , __snake_case : Any ) -> List[Any]:
UpperCAmelCase : Union[str, Any] = vqa_pipeline(__snake_case , top_k=1 )
self.assertEqual(
__snake_case , [
[{'''score''': ANY(__snake_case ), '''answer''': ANY(__snake_case )}],
[{'''score''': ANY(__snake_case ), '''answer''': ANY(__snake_case )}],
] , )
@require_torch
def A ( self : Optional[Any] ) -> Dict:
UpperCAmelCase : Union[str, Any] = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
UpperCAmelCase : Optional[int] = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
UpperCAmelCase : List[Any] = '''How many cats are there?'''
UpperCAmelCase : Any = vqa_pipeline(image=__snake_case , question='''How many cats are there?''' , top_k=2 )
self.assertEqual(
__snake_case , [{'''score''': ANY(__snake_case ), '''answer''': ANY(__snake_case )}, {'''score''': ANY(__snake_case ), '''answer''': ANY(__snake_case )}] )
UpperCAmelCase : List[str] = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
__snake_case , [{'''score''': ANY(__snake_case ), '''answer''': ANY(__snake_case )}, {'''score''': ANY(__snake_case ), '''answer''': ANY(__snake_case )}] )
@slow
@require_torch
def A ( self : Dict ) -> List[Any]:
UpperCAmelCase : int = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''' )
UpperCAmelCase : Union[str, Any] = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
UpperCAmelCase : Optional[Any] = '''How many cats are there?'''
UpperCAmelCase : int = vqa_pipeline(image=__snake_case , question=__snake_case , top_k=2 )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [{'''score''': 0.87_99, '''answer''': '''2'''}, {'''score''': 0.2_96, '''answer''': '''1'''}] )
UpperCAmelCase : Union[str, Any] = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [{'''score''': 0.87_99, '''answer''': '''2'''}, {'''score''': 0.2_96, '''answer''': '''1'''}] )
UpperCAmelCase : str = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [[{'''score''': 0.87_99, '''answer''': '''2'''}, {'''score''': 0.2_96, '''answer''': '''1'''}]] * 2 , )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''' )
def A ( self : Any ) -> List[Any]:
pass
| 528
|
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__: Optional[Any] = logging.get_logger(__name__)
UpperCamelCase__: Tuple = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = """time_series_transformer"""
lowerCamelCase__ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : str , __snake_case : Optional[int] = None , __snake_case : Optional[int] = None , __snake_case : str = "student_t" , __snake_case : str = "nll" , __snake_case : int = 1 , __snake_case : List[int] = [1, 2, 3, 4, 5, 6, 7] , __snake_case : Optional[Union[str, bool]] = "mean" , __snake_case : int = 0 , __snake_case : int = 0 , __snake_case : int = 0 , __snake_case : int = 0 , __snake_case : Optional[List[int]] = None , __snake_case : Optional[List[int]] = None , __snake_case : int = 32 , __snake_case : int = 32 , __snake_case : int = 2 , __snake_case : int = 2 , __snake_case : int = 2 , __snake_case : int = 2 , __snake_case : bool = True , __snake_case : str = "gelu" , __snake_case : int = 64 , __snake_case : float = 0.1 , __snake_case : float = 0.1 , __snake_case : float = 0.1 , __snake_case : float = 0.1 , __snake_case : float = 0.1 , __snake_case : int = 100 , __snake_case : float = 0.02 , __snake_case : Optional[Any]=True , **__snake_case : List[Any] , ) -> List[str]:
# time series specific configuration
UpperCAmelCase : List[Any] = prediction_length
UpperCAmelCase : List[Any] = context_length or prediction_length
UpperCAmelCase : Tuple = distribution_output
UpperCAmelCase : Optional[Any] = loss
UpperCAmelCase : Tuple = input_size
UpperCAmelCase : Optional[int] = num_time_features
UpperCAmelCase : Dict = lags_sequence
UpperCAmelCase : Any = scaling
UpperCAmelCase : Tuple = num_dynamic_real_features
UpperCAmelCase : Any = num_static_real_features
UpperCAmelCase : Optional[int] = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__snake_case ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
UpperCAmelCase : Any = cardinality
else:
UpperCAmelCase : Optional[Any] = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__snake_case ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
UpperCAmelCase : Optional[int] = embedding_dimension
else:
UpperCAmelCase : Optional[Any] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase : List[Any] = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase : int = input_size * len(__snake_case ) + self._number_of_features
UpperCAmelCase : int = d_model
UpperCAmelCase : str = encoder_attention_heads
UpperCAmelCase : str = decoder_attention_heads
UpperCAmelCase : List[str] = encoder_ffn_dim
UpperCAmelCase : Any = decoder_ffn_dim
UpperCAmelCase : Any = encoder_layers
UpperCAmelCase : str = decoder_layers
UpperCAmelCase : Optional[int] = dropout
UpperCAmelCase : Union[str, Any] = attention_dropout
UpperCAmelCase : List[Any] = activation_dropout
UpperCAmelCase : Optional[int] = encoder_layerdrop
UpperCAmelCase : Dict = decoder_layerdrop
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : Dict = init_std
UpperCAmelCase : Optional[int] = use_cache
super().__init__(is_encoder_decoder=__snake_case , **__snake_case )
@property
def A ( self : Union[str, Any] ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 528
| 1
|
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def _A (UpperCamelCase : List[str] ) ->str:
'''simple docstring'''
lowerCamelCase__ : Dict = []
for line in lines:
lowerCamelCase__ : List[Any] = re.sub(r"""#.*""" , """""" , UpperCamelCase ) # remove comments
if line:
filtered_lines.append(UpperCamelCase )
lowerCamelCase__ : Tuple = """\n""".join(UpperCamelCase )
# Make a hash from all this code
lowerCamelCase__ : Optional[Any] = full_str.encode("""utf-8""" )
return shaaaa(UpperCamelCase ).hexdigest()
# get importable module names and hash for caching
_lowercase = {
'''csv''': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'''json''': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'''pandas''': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'''parquet''': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'''arrow''': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'''text''': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'''imagefolder''': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'''audiofolder''': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
_lowercase = {
'''.csv''': ('''csv''', {}),
'''.tsv''': ('''csv''', {'''sep''': '''\t'''}),
'''.json''': ('''json''', {}),
'''.jsonl''': ('''json''', {}),
'''.parquet''': ('''parquet''', {}),
'''.arrow''': ('''arrow''', {}),
'''.txt''': ('''text''', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_lowercase = {'''imagefolder''', '''audiofolder'''}
# Used to filter data files based on extensions given a module name
_lowercase = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('''.zip''')
_MODULE_TO_EXTENSIONS["audiofolder"].append('''.zip''')
| 157
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=A_ )
class __A ( A_ ):
UpperCamelCase :str = field(default='''audio-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCamelCase :ClassVar[Features] = Features({'''audio''': Audio()} )
UpperCamelCase :ClassVar[Features] = Features({'''labels''': ClassLabel} )
UpperCamelCase :str = "audio"
UpperCamelCase :str = "labels"
def _snake_case (self , __magic_name__ ):
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , __magic_name__ ):
raise ValueError(f"Column {self.label_column} is not a ClassLabel." )
lowerCamelCase__ : Union[str, Any] = copy.deepcopy(self )
lowerCamelCase__ : List[str] = self.label_schema.copy()
lowerCamelCase__ : Optional[int] = features[self.label_column]
lowerCamelCase__ : List[Any] = label_schema
return task_template
@property
def _snake_case (self ):
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 157
| 1
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A_ ( _UpperCAmelCase ):
"""simple docstring"""
lowercase : Tuple = ["image_processor", "tokenizer"]
lowercase : str = "AutoImageProcessor"
lowercase : List[Any] = "AutoTokenizer"
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
a : Dict = self.image_processor
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Dict:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
a : int = self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if images is not None:
a : Optional[Any] = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None and images is not None:
a : int = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase )
def lowercase_ ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def lowercase_ ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def lowercase_ ( self ) -> Any:
return ["input_ids", "attention_mask", "pixel_values"]
| 705
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : int = {
"configuration_megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Tuple = [
"MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegatronBertForCausalLM",
"MegatronBertForMaskedLM",
"MegatronBertForMultipleChoice",
"MegatronBertForNextSentencePrediction",
"MegatronBertForPreTraining",
"MegatronBertForQuestionAnswering",
"MegatronBertForSequenceClassification",
"MegatronBertForTokenClassification",
"MegatronBertModel",
"MegatronBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 509
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Dict = logging.get_logger(__name__)
UpperCAmelCase : Any = {
"microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : Any = "cvt"
def __init__( self , A=3 , A=[7, 3, 3] , A=[4, 2, 2] , A=[2, 1, 1] , A=[64, 1_92, 3_84] , A=[1, 3, 6] , A=[1, 2, 10] , A=[4.0, 4.0, 4.0] , A=[0.0, 0.0, 0.0] , A=[0.0, 0.0, 0.0] , A=[0.0, 0.0, 0.1] , A=[True, True, True] , A=[False, False, True] , A=["dw_bn", "dw_bn", "dw_bn"] , A=[3, 3, 3] , A=[1, 1, 1] , A=[2, 2, 2] , A=[1, 1, 1] , A=[1, 1, 1] , A=0.02 , A=1e-1_2 , **A , ) -> Dict:
'''simple docstring'''
super().__init__(**A )
lowerCamelCase = num_channels
lowerCamelCase = patch_sizes
lowerCamelCase = patch_stride
lowerCamelCase = patch_padding
lowerCamelCase = embed_dim
lowerCamelCase = num_heads
lowerCamelCase = depth
lowerCamelCase = mlp_ratio
lowerCamelCase = attention_drop_rate
lowerCamelCase = drop_rate
lowerCamelCase = drop_path_rate
lowerCamelCase = qkv_bias
lowerCamelCase = cls_token
lowerCamelCase = qkv_projection_method
lowerCamelCase = kernel_qkv
lowerCamelCase = padding_kv
lowerCamelCase = stride_kv
lowerCamelCase = padding_q
lowerCamelCase = stride_q
lowerCamelCase = initializer_range
lowerCamelCase = layer_norm_eps
| 457
|
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class __lowercase ( a_ ):
"""simple docstring"""
def __init__( self , A , A ) -> List[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=A , scheduler=A )
@torch.no_grad()
def __call__( self , A = 1 , A = None , A = 50 , A = "pil" , A = True , **A , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
lowerCamelCase = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=A , )
lowerCamelCase = image.to(self.device )
# set step values
self.scheduler.set_timesteps(A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase = self.unet(A , A ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCamelCase = self.scheduler.step(A , A , A ).prev_sample
lowerCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase = self.numpy_to_pil(A )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=A ), "This is a local test"
| 457
| 1
|
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
A : List[str] = pytest.mark.integration
A : Optional[int] = {'comet'}
A : Union[str, Any] = importlib.util.find_spec('fairseq') is not None
A : List[Any] = {'code_eval'}
A : Union[str, Any] = os.name == 'nt'
A : List[str] = {'bertscore', 'frugalscore', 'perplexity'}
A : Dict = importlib.util.find_spec('transformers') is not None
def _lowerCAmelCase ( _lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
@wraps(_lowerCAmelCase )
def wrapper(self , _lowerCAmelCase ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("\"test requires Fairseq\"" )
else:
test_case(self , _lowerCAmelCase )
return wrapper
def _lowerCAmelCase ( _lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
@wraps(_lowerCAmelCase )
def wrapper(self , _lowerCAmelCase ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("\"test requires transformers\"" )
else:
test_case(self , _lowerCAmelCase )
return wrapper
def _lowerCAmelCase ( _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
@wraps(_lowerCAmelCase )
def wrapper(self , _lowerCAmelCase ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("\"test not supported on Windows\"" )
else:
test_case(self , _lowerCAmelCase )
return wrapper
def _lowerCAmelCase ( ) -> int:
'''simple docstring'''
__snake_case = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
lowercase_ , lowercase_ , lowercase_ )
@local
class UpperCamelCase( parameterized.TestCase ):
snake_case_ : Optional[Any] = {}
snake_case_ : Optional[int] = None
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning" )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict ) -> Any:
'''simple docstring'''
__snake_case = "[...]"
__snake_case = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , UpperCamelCase__ ) ).module_path )
__snake_case = datasets.load.import_main_class(metric_module.__name__ , dataset=UpperCamelCase__ )
# check parameters
__snake_case = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(UpperCamelCase__ , metric_module.__name__ ):
with self.use_local_metrics():
try:
__snake_case = doctest.testmod(UpperCamelCase__ , verbose=UpperCamelCase__ , raise_on_error=UpperCamelCase__ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE : Dict ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = "[...]"
__snake_case = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , UpperCamelCase__ ) ).module_path )
# run doctest
with self.use_local_metrics():
__snake_case = doctest.testmod(UpperCamelCase__ , verbose=UpperCamelCase__ , raise_on_error=UpperCamelCase__ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str] ) -> int:
'''simple docstring'''
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](UpperCamelCase__ ):
yield
else:
yield
@contextmanager
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Any:
'''simple docstring'''
def load_local_metric(SCREAMING_SNAKE_CASE : Optional[Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
return load_metric(os.path.join("metrics" , UpperCamelCase__ ) , *UpperCamelCase__ , **UpperCamelCase__ )
with patch("datasets.load_metric" ) as mock_load_metric:
__snake_case = load_local_metric
yield
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple:
'''simple docstring'''
def wrapper(SCREAMING_SNAKE_CASE : Dict ):
__snake_case = contextmanager(UpperCamelCase__ )
__snake_case = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("bleurt" )
def _lowerCAmelCase ( _lowerCAmelCase ) -> Tuple:
'''simple docstring'''
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("sv" , "" , "" ) # handle pytest cli flags
class UpperCamelCase( lowercase_ ):
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE : Dict ) -> Dict:
'''simple docstring'''
assert len(input_dict["input_ids"] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("bleurt.score._create_predictor" ) as mock_create_predictor:
__snake_case = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("bertscore" )
def _lowerCAmelCase ( _lowerCAmelCase ) -> Dict:
'''simple docstring'''
import torch
def bert_cos_score_idf(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(_lowerCAmelCase ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("bert_score.scorer.get_model" ), patch(
"bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf:
__snake_case = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("comet" )
def _lowerCAmelCase ( _lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
def load_from_checkpoint(_lowerCAmelCase ):
class UpperCamelCase:
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : str ) -> int:
'''simple docstring'''
assert len(UpperCamelCase__ ) == 2
__snake_case = [0.19, 0.92]
return scores, sum(UpperCamelCase__ ) / len(UpperCamelCase__ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("comet.download_model" ) as mock_download_model:
__snake_case = None
with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint:
__snake_case = load_from_checkpoint
yield
def _lowerCAmelCase ( ) -> List[str]:
'''simple docstring'''
__snake_case = load_metric(os.path.join("metrics" , "seqeval" ) )
__snake_case = "ERROR"
__snake_case = F'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'''
with pytest.raises(_lowerCAmelCase , match=re.escape(_lowerCAmelCase ) ):
metric.compute(predictions=[] , references=[] , scheme=_lowerCAmelCase )
| 711
|
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def _lowerCAmelCase ( _lowerCAmelCase = "" ) -> dict[str, float]:
'''simple docstring'''
__snake_case = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250"
__snake_case = BeautifulSoup(requests.get(_lowerCAmelCase ).text , "html.parser" )
__snake_case = soup.find_all("td" , attrs="titleColumn" )
__snake_case = soup.find_all("td" , class_="ratingColumn imdbRating" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(_lowerCAmelCase , _lowerCAmelCase )
}
def _lowerCAmelCase ( _lowerCAmelCase = "IMDb_Top_250_Movies.csv" ) -> None:
'''simple docstring'''
__snake_case = get_imdb_top_aaa_movies()
with open(_lowerCAmelCase , "w" , newline="" ) as out_file:
__snake_case = csv.writer(_lowerCAmelCase )
writer.writerow(["Movie title", "IMDb rating"] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 473
| 0
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCamelCase_ : int = logging.get_logger(__name__)
UpperCamelCase_ : List[Any] = Dict[str, Any]
UpperCamelCase_ : Optional[int] = List[Prediction]
@add_end_docstrings(__snake_case )
class __lowercase ( __snake_case ):
def __init__(self : Dict , *snake_case : List[Any] , **snake_case : Tuple ) -> Any:
super().__init__(*snake_case , **snake_case )
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , "vision" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def _a(self : Optional[int] , **snake_case : Union[str, Any] ) -> Any:
_lowercase : List[Any] = {}
if "threshold" in kwargs:
_lowercase : Optional[Any] = kwargs["threshold"]
return {}, {}, postprocess_kwargs
def __call__(self : str , *snake_case : Dict , **snake_case : Optional[int] ) -> Union[Predictions, List[Prediction]]:
return super().__call__(*snake_case , **snake_case )
def _a(self : Tuple , snake_case : str ) -> Dict:
_lowercase : List[str] = load_image(snake_case )
_lowercase : str = torch.IntTensor([[image.height, image.width]] )
_lowercase : int = self.image_processor(images=[image] , return_tensors="pt" )
if self.tokenizer is not None:
_lowercase : List[Any] = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" )
_lowercase : str = target_size
return inputs
def _a(self : List[Any] , snake_case : Any ) -> str:
_lowercase : Dict = model_inputs.pop("target_size" )
_lowercase : str = self.model(**snake_case )
_lowercase : Union[str, Any] = outputs.__class__({"target_size": target_size, **outputs} )
if self.tokenizer is not None:
_lowercase : str = model_inputs["bbox"]
return model_outputs
def _a(self : str , snake_case : Tuple , snake_case : Dict=0.9 ) -> List[str]:
_lowercase : Optional[Any] = model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
_lowercase , _lowercase : str = target_size[0].tolist()
def unnormalize(snake_case : str ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
_lowercase , _lowercase : Any = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
_lowercase : Optional[Any] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
_lowercase : Dict = [unnormalize(snake_case ) for bbox in model_outputs["bbox"].squeeze(0 )]
_lowercase : Tuple = ["score", "label", "box"]
_lowercase : int = [dict(zip(snake_case , snake_case ) ) for vals in zip(scores.tolist() , snake_case , snake_case ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
_lowercase : str = self.image_processor.post_process_object_detection(snake_case , snake_case , snake_case )
_lowercase : List[Any] = raw_annotations[0]
_lowercase : str = raw_annotation["scores"]
_lowercase : Optional[int] = raw_annotation["labels"]
_lowercase : Dict = raw_annotation["boxes"]
_lowercase : int = scores.tolist()
_lowercase : List[str] = [self.model.config.idalabel[label.item()] for label in labels]
_lowercase : List[Any] = [self._get_bounding_box(snake_case ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
_lowercase : Union[str, Any] = ["score", "label", "box"]
_lowercase : Tuple = [
dict(zip(snake_case , snake_case ) )
for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] )
]
return annotation
def _a(self : int , snake_case : "torch.Tensor" ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." )
_lowercase , _lowercase , _lowercase , _lowercase : Optional[int] = box.int().tolist()
_lowercase : Tuple = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 461
|
from __future__ import annotations
def UpperCamelCase ( _UpperCAmelCase : str , _UpperCAmelCase : list[str] | None = None ) -> list[list[str]]:
'''simple docstring'''
_lowercase : Dict = word_bank or []
# create a table
_lowercase : int = len(_UpperCAmelCase ) + 1
_lowercase : list[list[list[str]]] = []
for _ in range(_UpperCAmelCase ):
table.append([] )
# seed value
_lowercase : Optional[int] = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_UpperCAmelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_UpperCAmelCase )] == word:
_lowercase : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_UpperCAmelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_UpperCAmelCase )]:
combination.reverse()
return table[len(_UpperCAmelCase )]
if __name__ == "__main__":
print(all_construct("""jwajalapa""", ["""jwa""", """j""", """w""", """a""", """la""", """lapa"""]))
print(all_construct("""rajamati""", ["""s""", """raj""", """amat""", """raja""", """ma""", """i""", """t"""]))
print(
all_construct(
"""hexagonosaurus""",
["""h""", """ex""", """hex""", """ag""", """ago""", """ru""", """auru""", """rus""", """go""", """no""", """o""", """s"""],
)
)
| 461
| 1
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _SCREAMING_SNAKE_CASE ( _a ):
@staticmethod
@abstractmethod
def _A ( __lowerCamelCase : ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def _A ( self : List[Any] ):
raise NotImplementedError()
| 590
|
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
UpperCAmelCase_ : Any = sys.version_info >= (3, 10)
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str=None , __magic_name__ : Any=None ) -> Any:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=__magic_name__ )
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : int
snake_case__ : float
snake_case__ : str
snake_case__ : bool
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : int = 4_2
snake_case__ : str = field(default="""toto""" , metadata={"""help""": """help message"""} )
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : bool = False
snake_case__ : bool = True
snake_case__ : Optional[bool] = None
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Optional[Any] = """titi"""
snake_case__ : Optional[Any] = """toto"""
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : str = """titi"""
snake_case__ : Tuple = """toto"""
snake_case__ : Tuple = 4_2
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : BasicEnum = "toto"
def _A ( self : str ):
UpperCamelCase :List[Any] = BasicEnum(self.foo )
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : MixedTypeEnum = "toto"
def _A ( self : str ):
UpperCamelCase :List[str] = MixedTypeEnum(self.foo )
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : Optional[int] = None
snake_case__ : Optional[float] = field(default=_a , metadata={"""help""": """help message"""} )
snake_case__ : Optional[str] = None
snake_case__ : Optional[List[str]] = list_field(default=[] )
snake_case__ : Optional[List[int]] = list_field(default=[] )
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : List[int] = list_field(default=[] )
snake_case__ : List[int] = list_field(default=[1, 2, 3] )
snake_case__ : List[str] = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
snake_case__ : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : List[int] = field()
snake_case__ : str = field()
snake_case__ : BasicEnum = field()
def _A ( self : Dict ):
UpperCamelCase :List[str] = BasicEnum(self.required_enum )
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : int
snake_case__ : "BasicEnum" = field()
snake_case__ : "Optional[bool]" = None
snake_case__ : "str" = field(default="""toto""" , metadata={"""help""": """help message"""} )
snake_case__ : "List[str]" = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
if is_python_no_less_than_3_10:
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : bool = False
snake_case__ : bool = True
snake_case__ : bool | None = None
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : int | None = None
snake_case__ : float | None = field(default=_a , metadata={"""help""": """help message"""} )
snake_case__ : str | None = None
snake_case__ : list[str] | None = list_field(default=[] )
snake_case__ : list[int] | None = list_field(default=[] )
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : Dict , __lowerCamelCase : argparse.ArgumentParser , __lowerCamelCase : argparse.ArgumentParser ):
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
UpperCamelCase :List[Any] = {k: v for k, v in vars(__lowerCamelCase ).items() if k != """container"""}
UpperCamelCase :Union[str, Any] = {k: v for k, v in vars(__lowerCamelCase ).items() if k != """container"""}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("""choices""" , __lowerCamelCase ) and yy.get("""choices""" , __lowerCamelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["""type"""](__lowerCamelCase ) , yy["""type"""](__lowerCamelCase ) )
del xx["type"], yy["type"]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : Optional[Any] ):
UpperCamelCase :List[Any] = HfArgumentParser(__lowerCamelCase )
UpperCamelCase :List[Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=__lowerCamelCase , required=__lowerCamelCase )
expected.add_argument("""--bar""" , type=__lowerCamelCase , required=__lowerCamelCase )
expected.add_argument("""--baz""" , type=__lowerCamelCase , required=__lowerCamelCase )
expected.add_argument("""--flag""" , type=__lowerCamelCase , default=__lowerCamelCase , const=__lowerCamelCase , nargs="""?""" )
self.argparsersEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :str = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""]
((UpperCamelCase) , ) :List[Any] = parser.parse_args_into_dataclasses(__lowerCamelCase , look_for_args_file=__lowerCamelCase )
self.assertFalse(example.flag )
def _A ( self : str ):
UpperCamelCase :Union[str, Any] = HfArgumentParser(__lowerCamelCase )
UpperCamelCase :List[Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=42 , type=__lowerCamelCase )
expected.add_argument("""--baz""" , default="""toto""" , type=__lowerCamelCase , help="""help message""" )
self.argparsersEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : Optional[int] ):
UpperCamelCase :Optional[int] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=__lowerCamelCase , default=__lowerCamelCase , const=__lowerCamelCase , nargs="""?""" )
expected.add_argument("""--baz""" , type=__lowerCamelCase , default=__lowerCamelCase , const=__lowerCamelCase , nargs="""?""" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("""--no_baz""" , action="""store_false""" , default=__lowerCamelCase , dest="""baz""" )
expected.add_argument("""--opt""" , type=__lowerCamelCase , default=__lowerCamelCase )
UpperCamelCase :Tuple = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__lowerCamelCase )
for dataclass_type in dataclass_types:
UpperCamelCase :Union[str, Any] = HfArgumentParser(__lowerCamelCase )
self.argparsersEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Tuple = parser.parse_args([] )
self.assertEqual(__lowerCamelCase , Namespace(foo=__lowerCamelCase , baz=__lowerCamelCase , opt=__lowerCamelCase ) )
UpperCamelCase :Any = parser.parse_args(["""--foo""", """--no_baz"""] )
self.assertEqual(__lowerCamelCase , Namespace(foo=__lowerCamelCase , baz=__lowerCamelCase , opt=__lowerCamelCase ) )
UpperCamelCase :Optional[int] = parser.parse_args(["""--foo""", """--baz"""] )
self.assertEqual(__lowerCamelCase , Namespace(foo=__lowerCamelCase , baz=__lowerCamelCase , opt=__lowerCamelCase ) )
UpperCamelCase :List[Any] = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] )
self.assertEqual(__lowerCamelCase , Namespace(foo=__lowerCamelCase , baz=__lowerCamelCase , opt=__lowerCamelCase ) )
UpperCamelCase :Optional[int] = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] )
self.assertEqual(__lowerCamelCase , Namespace(foo=__lowerCamelCase , baz=__lowerCamelCase , opt=__lowerCamelCase ) )
def _A ( self : Any ):
UpperCamelCase :Optional[int] = HfArgumentParser(__lowerCamelCase )
UpperCamelCase :Union[str, Any] = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Tuple = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
UpperCamelCase :str = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
UpperCamelCase :str = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
UpperCamelCase :Tuple = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
UpperCamelCase :Optional[int] = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
UpperCamelCase :List[Any] = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def _A ( self : List[str] ):
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : Literal["titi", "toto", 4_2] = "toto"
UpperCamelCase :Optional[Any] = HfArgumentParser(__lowerCamelCase )
UpperCamelCase :List[str] = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Optional[Any] = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
UpperCamelCase :int = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
UpperCamelCase :List[str] = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
def _A ( self : Tuple ):
UpperCamelCase :Any = HfArgumentParser(__lowerCamelCase )
UpperCamelCase :int = argparse.ArgumentParser()
expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=__lowerCamelCase )
expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=__lowerCamelCase )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=__lowerCamelCase )
expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=__lowerCamelCase )
self.argparsersEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :List[Any] = parser.parse_args([] )
self.assertEqual(
__lowerCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , )
UpperCamelCase :Tuple = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() )
self.assertEqual(__lowerCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) )
def _A ( self : Optional[Any] ):
UpperCamelCase :Union[str, Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=__lowerCamelCase , type=__lowerCamelCase )
expected.add_argument("""--bar""" , default=__lowerCamelCase , type=__lowerCamelCase , help="""help message""" )
expected.add_argument("""--baz""" , default=__lowerCamelCase , type=__lowerCamelCase )
expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=__lowerCamelCase )
expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=__lowerCamelCase )
UpperCamelCase :List[Any] = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__lowerCamelCase )
for dataclass_type in dataclass_types:
UpperCamelCase :List[Any] = HfArgumentParser(__lowerCamelCase )
self.argparsersEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Tuple = parser.parse_args([] )
self.assertEqual(__lowerCamelCase , Namespace(foo=__lowerCamelCase , bar=__lowerCamelCase , baz=__lowerCamelCase , ces=[] , des=[] ) )
UpperCamelCase :List[str] = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() )
self.assertEqual(__lowerCamelCase , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) )
def _A ( self : Any ):
UpperCamelCase :Any = HfArgumentParser(__lowerCamelCase )
UpperCamelCase :Dict = argparse.ArgumentParser()
expected.add_argument("""--required_list""" , nargs="""+""" , type=__lowerCamelCase , required=__lowerCamelCase )
expected.add_argument("""--required_str""" , type=__lowerCamelCase , required=__lowerCamelCase )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=__lowerCamelCase , )
self.argparsersEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : List[Any] ):
UpperCamelCase :Dict = HfArgumentParser(__lowerCamelCase )
UpperCamelCase :Optional[int] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=__lowerCamelCase , required=__lowerCamelCase )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=__lowerCamelCase , )
expected.add_argument("""--opt""" , type=__lowerCamelCase , default=__lowerCamelCase )
expected.add_argument("""--baz""" , default="""toto""" , type=__lowerCamelCase , help="""help message""" )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=__lowerCamelCase )
self.argparsersEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : Any ):
UpperCamelCase :Optional[int] = HfArgumentParser(__lowerCamelCase )
UpperCamelCase :Optional[int] = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
UpperCamelCase :List[str] = parser.parse_dict(__lowerCamelCase )[0]
UpperCamelCase :Union[str, Any] = BasicExample(**__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : List[str] ):
UpperCamelCase :Optional[Any] = HfArgumentParser(__lowerCamelCase )
UpperCamelCase :int = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
"""extra""": 42,
}
self.assertRaises(__lowerCamelCase , parser.parse_dict , __lowerCamelCase , allow_extra_keys=__lowerCamelCase )
def _A ( self : List[str] ):
UpperCamelCase :Tuple = HfArgumentParser(__lowerCamelCase )
UpperCamelCase :Dict = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase :int = os.path.join(__lowerCamelCase , """temp_json""" )
os.mkdir(__lowerCamelCase )
with open(temp_local_path + """.json""" , """w+""" ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Union[str, Any] = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0]
UpperCamelCase :Dict = BasicExample(**__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : Optional[int] ):
UpperCamelCase :Tuple = HfArgumentParser(__lowerCamelCase )
UpperCamelCase :Any = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase :Any = os.path.join(__lowerCamelCase , """temp_yaml""" )
os.mkdir(__lowerCamelCase )
with open(temp_local_path + """.yaml""" , """w+""" ) as f:
yaml.dump(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :List[Any] = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0]
UpperCamelCase :List[str] = BasicExample(**__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : Optional[int] ):
UpperCamelCase :Optional[Any] = HfArgumentParser(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
| 590
| 1
|
"""simple docstring"""
from ....utils import logging
UpperCAmelCase = logging.get_logger(__name__)
class lowercase__ ( lowerCAmelCase__ ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=2048) -> Union[str, Any]:
_lowerCamelCase : Optional[Any] = config.__dict__
_lowerCamelCase : int = modal_hidden_size
if num_labels:
_lowerCamelCase : Dict = num_labels
| 88
|
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : int ):
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(lowerCamelCase_ ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 502
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict=13 , UpperCAmelCase_ : Optional[int]=7 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Dict=99 , UpperCAmelCase_ : Any=32 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : str=37 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Dict=512 , UpperCAmelCase_ : Any=16 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : Optional[int]=None , ):
SCREAMING_SNAKE_CASE : Dict = parent
SCREAMING_SNAKE_CASE : int = 13
SCREAMING_SNAKE_CASE : List[str] = 7
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : List[Any] = 99
SCREAMING_SNAKE_CASE : Tuple = 32
SCREAMING_SNAKE_CASE : int = 2
SCREAMING_SNAKE_CASE : List[Any] = 4
SCREAMING_SNAKE_CASE : Tuple = 37
SCREAMING_SNAKE_CASE : List[str] = "gelu"
SCREAMING_SNAKE_CASE : int = 0.1
SCREAMING_SNAKE_CASE : List[str] = 0.1
SCREAMING_SNAKE_CASE : Any = 512
SCREAMING_SNAKE_CASE : Optional[Any] = 16
SCREAMING_SNAKE_CASE : List[Any] = 2
SCREAMING_SNAKE_CASE : List[str] = 0.02
SCREAMING_SNAKE_CASE : Optional[int] = 3
SCREAMING_SNAKE_CASE : Dict = 4
SCREAMING_SNAKE_CASE : List[Any] = None
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : int = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : str = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Any = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Any = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : Optional[Any] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _A ( self : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Tuple = TFRoFormerModel(config=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE : int = [input_ids, input_mask]
SCREAMING_SNAKE_CASE : List[str] = model(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A ( self : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : int = TFRoFormerForCausalLM(config=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE : List[str] = model(UpperCAmelCase_ )["logits"]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def _A ( self : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] ):
SCREAMING_SNAKE_CASE : str = TFRoFormerForMaskedLM(config=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE : Optional[int] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A ( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE : Dict = self.num_labels
SCREAMING_SNAKE_CASE : Optional[int] = TFRoFormerForSequenceClassification(config=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE : int = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE : Dict = self.num_choices
SCREAMING_SNAKE_CASE : Dict = TFRoFormerForMultipleChoice(config=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : List[Any] = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : Tuple = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : List[str] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE : Optional[Any] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A ( self : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_labels
SCREAMING_SNAKE_CASE : Any = TFRoFormerForTokenClassification(config=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE : Optional[Any] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A ( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : List[str] = TFRoFormerForQuestionAnswering(config=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE : int = model(UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs()
(
SCREAMING_SNAKE_CASE
) : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase_ : List[str] = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ : List[str] = False
UpperCamelCase_ : Tuple = False
def _A ( self : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : List[Any] = TFRoFormerModelTester(self )
SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def _A ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*UpperCAmelCase_ )
def _A ( self : int ):
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase_ )
def _A ( self : int ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ )
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : List[str] = TFRoFormerModel.from_pretrained("junnyu/roformer_chinese_base" )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : Optional[Any] = TFRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
SCREAMING_SNAKE_CASE : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE : str = model(UpperCAmelCase_ )[0]
# TODO Replace vocab size
SCREAMING_SNAKE_CASE : Tuple = 5_0000
SCREAMING_SNAKE_CASE : Tuple = [1, 6, vocab_size]
self.assertEqual(output.shape , UpperCAmelCase_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
SCREAMING_SNAKE_CASE : Any = tf.constant(
[
[
[-0.12_053_341, -1.0_264_901, 0.29_221_946],
[-1.5_133_783, 0.197_433, 0.15_190_607],
[-5.0_135_403, -3.900_256, -0.84_038_764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 )
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : int = 1e-4
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : Optional[int] = tf.constant([[4, 10]] )
SCREAMING_SNAKE_CASE : Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
SCREAMING_SNAKE_CASE : Any = emba(input_ids.shape )
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.constant(
[[0.0_000, 0.0_000, 0.0_000, 1.0_000, 1.0_000, 1.0_000], [0.8_415, 0.0_464, 0.0_022, 0.5_403, 0.9_989, 1.0_000]] )
tf.debugging.assert_near(UpperCAmelCase_ , UpperCAmelCase_ , atol=self.tolerance )
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : Any = tf.constant(
[
[0.0_000, 0.0_000, 0.0_000, 0.0_000, 0.0_000],
[0.8_415, 0.8_219, 0.8_020, 0.7_819, 0.7_617],
[0.9_093, 0.9_364, 0.9_581, 0.9_749, 0.9_870],
] )
SCREAMING_SNAKE_CASE : Union[str, Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
SCREAMING_SNAKE_CASE : Optional[int] = emba.weight[:3, :5]
tf.debugging.assert_near(UpperCAmelCase_ , UpperCAmelCase_ , atol=self.tolerance )
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Dict = 1e-4
def _A ( self : Union[str, Any] ):
# 2,12,16,64
SCREAMING_SNAKE_CASE : Optional[Any] = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
SCREAMING_SNAKE_CASE : Union[str, Any] = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
SCREAMING_SNAKE_CASE : Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
SCREAMING_SNAKE_CASE : Optional[int] = embed_positions([2, 16, 768] )[None, None, :, :]
SCREAMING_SNAKE_CASE : List[str] = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = tf.constant(
[
[0.0_000, 0.0_100, 0.0_200, 0.0_300, 0.0_400, 0.0_500, 0.0_600, 0.0_700],
[-0.2_012, 0.8_897, 0.0_263, 0.9_401, 0.2_074, 0.9_463, 0.3_481, 0.9_343],
[-1.7_057, 0.6_271, -1.2_145, 1.3_897, -0.6_303, 1.7_647, -0.1_173, 1.8_985],
[-2.1_731, -1.6_397, -2.7_358, 0.2_854, -2.1_840, 1.7_183, -1.3_018, 2.4_871],
[0.2_717, -3.6_173, -2.9_206, -2.1_988, -3.6_638, 0.3_858, -2.9_155, 2.2_980],
[3.9_859, -2.1_580, -0.7_984, -4.4_904, -4.1_181, -2.0_252, -4.4_782, 1.1_253],
] )
SCREAMING_SNAKE_CASE : Dict = tf.constant(
[
[0.0_000, -0.0_100, -0.0_200, -0.0_300, -0.0_400, -0.0_500, -0.0_600, -0.0_700],
[0.2_012, -0.8_897, -0.0_263, -0.9_401, -0.2_074, -0.9_463, -0.3_481, -0.9_343],
[1.7_057, -0.6_271, 1.2_145, -1.3_897, 0.6_303, -1.7_647, 0.1_173, -1.8_985],
[2.1_731, 1.6_397, 2.7_358, -0.2_854, 2.1_840, -1.7_183, 1.3_018, -2.4_871],
[-0.2_717, 3.6_173, 2.9_206, 2.1_988, 3.6_638, -0.3_858, 2.9_155, -2.2_980],
[-3.9_859, 2.1_580, 0.7_984, 4.4_904, 4.1_181, 2.0_252, 4.4_782, -1.1_253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , UpperCAmelCase_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , UpperCAmelCase_ , atol=self.tolerance )
| 705
|
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = HfArgumentParser(lowercase )
SCREAMING_SNAKE_CASE : Any = parser.parse_args_into_dataclasses()[0]
SCREAMING_SNAKE_CASE : Optional[Any] = TensorFlowBenchmark(args=lowercase )
try:
SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
SCREAMING_SNAKE_CASE : int = "Arg --no_{0} is no longer used, please use --no-{0} instead."
SCREAMING_SNAKE_CASE : Optional[int] = " ".join(str(lowercase ).split(" " )[:-1] )
SCREAMING_SNAKE_CASE : Union[str, Any] = ""
SCREAMING_SNAKE_CASE : Any = eval(str(lowercase ).split(" " )[-1] )
SCREAMING_SNAKE_CASE : List[str] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowercase )
if len(lowercase ) > 0:
SCREAMING_SNAKE_CASE : Optional[int] = full_error_msg + begin_error_msg + str(lowercase )
raise ValueError(lowercase )
benchmark.run()
if __name__ == "__main__":
main()
| 488
| 0
|
"""simple docstring"""
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__lowerCAmelCase : List[str] = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''ernie_m'''
_lowerCamelCase = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , _lowercase = 2_5_0_0_0_2 , _lowercase = 7_6_8 , _lowercase = 1_2 , _lowercase = 1_2 , _lowercase = 3_0_7_2 , _lowercase = "gelu" , _lowercase = 0.1 , _lowercase = 0.1 , _lowercase = 5_1_4 , _lowercase = 0.02 , _lowercase = 1 , _lowercase = 1E-05 , _lowercase=None , _lowercase=False , _lowercase=0.0 , **_lowercase , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=_lowercase , **_lowercase )
snake_case_ : Optional[int] = vocab_size
snake_case_ : Dict = hidden_size
snake_case_ : Optional[int] = num_hidden_layers
snake_case_ : List[str] = num_attention_heads
snake_case_ : str = intermediate_size
snake_case_ : int = hidden_act
snake_case_ : Any = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : int = max_position_embeddings
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Union[str, Any] = layer_norm_eps
snake_case_ : Optional[int] = classifier_dropout
snake_case_ : List[str] = is_decoder
snake_case_ : Optional[int] = act_dropout
| 58
|
'''simple docstring'''
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
__UpperCamelCase : int = getLogger(__name__)
__UpperCamelCase : int = """cuda""" if torch.cuda.is_available() else """cpu"""
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: List[str], SCREAMING_SNAKE_CASE__: str, SCREAMING_SNAKE_CASE__: str, SCREAMING_SNAKE_CASE__: int = 8, SCREAMING_SNAKE_CASE__: str = DEFAULT_DEVICE, SCREAMING_SNAKE_CASE__: Any=False, SCREAMING_SNAKE_CASE__: Tuple="summarization", SCREAMING_SNAKE_CASE__: List[Any]=None, **SCREAMING_SNAKE_CASE__: int, ) -> Dict:
"""simple docstring"""
__a = Path(SCREAMING_SNAKE_CASE__ ).open('w', encoding='utf-8' )
__a = str(SCREAMING_SNAKE_CASE__ )
__a = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
if fpaa:
__a = model.half()
__a = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
__a = time.time()
# update config with task specific params
use_task_specific_params(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
if prefix is None:
__a = prefix or getattr(model.config, 'prefix', '' ) or ''
for examples_chunk in tqdm(list(chunks(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) ) ):
__a = [prefix + text for text in examples_chunk]
__a = tokenizer(SCREAMING_SNAKE_CASE__, return_tensors='pt', truncation=SCREAMING_SNAKE_CASE__, padding='longest' ).to(SCREAMING_SNAKE_CASE__ )
__a = model.generate(
input_ids=batch.input_ids, attention_mask=batch.attention_mask, **SCREAMING_SNAKE_CASE__, )
__a = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__, skip_special_tokens=SCREAMING_SNAKE_CASE__, clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
for hypothesis in dec:
fout.write(hypothesis + '\n' )
fout.flush()
fout.close()
__a = int(time.time() - start_time ) # seconds
__a = len(SCREAMING_SNAKE_CASE__ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs, 4 )}
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Optional[int]=True ) -> List[Any]:
"""simple docstring"""
__a = argparse.ArgumentParser()
parser.add_argument('model_name', type=SCREAMING_SNAKE_CASE__, help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('input_path', type=SCREAMING_SNAKE_CASE__, help='like cnn_dm/test.source' )
parser.add_argument('save_path', type=SCREAMING_SNAKE_CASE__, help='where to save summaries' )
parser.add_argument('--reference_path', type=SCREAMING_SNAKE_CASE__, required=SCREAMING_SNAKE_CASE__, help='like cnn_dm/test.target' )
parser.add_argument('--score_path', type=SCREAMING_SNAKE_CASE__, required=SCREAMING_SNAKE_CASE__, default='metrics.json', help='where to save metrics' )
parser.add_argument('--device', type=SCREAMING_SNAKE_CASE__, required=SCREAMING_SNAKE_CASE__, default=SCREAMING_SNAKE_CASE__, help='cuda, cuda:1, cpu etc.' )
parser.add_argument(
'--prefix', type=SCREAMING_SNAKE_CASE__, required=SCREAMING_SNAKE_CASE__, default=SCREAMING_SNAKE_CASE__, help='will be added to the begininng of src examples' )
parser.add_argument('--task', type=SCREAMING_SNAKE_CASE__, default='summarization', help='used for task_specific_params + metrics' )
parser.add_argument('--bs', type=SCREAMING_SNAKE_CASE__, default=8, required=SCREAMING_SNAKE_CASE__, help='batch size' )
parser.add_argument(
'--n_obs', type=SCREAMING_SNAKE_CASE__, default=-1, required=SCREAMING_SNAKE_CASE__, help='How many observations. Defaults to all.' )
parser.add_argument('--fp16', action='store_true' )
parser.add_argument('--dump-args', action='store_true', help='print the custom hparams with the results' )
parser.add_argument(
'--info', nargs='?', type=SCREAMING_SNAKE_CASE__, const=datetime_now(), help=(
'use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'
' lang=en-ru. If no value is passed, the current datetime string will be used.'
), )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__a , __a = parser.parse_known_args()
__a = parse_numeric_n_bool_cl_kwargs(SCREAMING_SNAKE_CASE__ )
if parsed_args and verbose:
print(f"""parsed the following generate kwargs: {parsed_args}""" )
__a = [' ' + x.rstrip() if 't5' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__a = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('Can\'t mix --fp16 and --device cpu' )
__a = generate_summaries_or_translations(
SCREAMING_SNAKE_CASE__, args.save_path, args.model_name, batch_size=args.bs, device=args.device, fpaa=args.fpaa, task=args.task, prefix=args.prefix, **SCREAMING_SNAKE_CASE__, )
if args.reference_path is None:
return {}
# Compute scores
__a = calculate_bleu if 'translation' in args.task else calculate_rouge
__a = [x.rstrip() for x in open(args.save_path ).readlines()]
__a = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(SCREAMING_SNAKE_CASE__ )]
__a = score_fn(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
scores.update(SCREAMING_SNAKE_CASE__ )
if args.dump_args:
scores.update(SCREAMING_SNAKE_CASE__ )
if args.info:
__a = args.info
if verbose:
print(SCREAMING_SNAKE_CASE__ )
if args.score_path is not None:
json.dump(SCREAMING_SNAKE_CASE__, open(args.score_path, 'w' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 448
| 0
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all BART models at https://huggingface.co/models?filter=bart
__SCREAMING_SNAKE_CASE : int = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
}
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'facebook/bart-base': 1_024,
'facebook/bart-large': 1_024,
'facebook/bart-large-mnli': 1_024,
'facebook/bart-large-cnn': 1_024,
'facebook/bart-large-xsum': 1_024,
'yjernite/bart_eli5': 1_024,
}
@lru_cache()
def _a ( ) -> Dict:
snake_case_ = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
snake_case_ = bs[:]
snake_case_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowerCAmelCase )
cs.append(2**8 + n )
n += 1
snake_case_ = [chr(__lowerCAmelCase ) for n in cs]
return dict(zip(__lowerCAmelCase , __lowerCAmelCase ) )
def _a ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
snake_case_ = set()
snake_case_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case_ = char
return pairs
class __A (snake_case__):
'''simple docstring'''
__lowercase: Optional[int] = VOCAB_FILES_NAMES
__lowercase: List[str] = PRETRAINED_VOCAB_FILES_MAP
__lowercase: Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase: Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any]="replace" , UpperCAmelCase_ : int="<s>" , UpperCAmelCase_ : Tuple="</s>" , UpperCAmelCase_ : Any="</s>" , UpperCAmelCase_ : List[Any]="<s>" , UpperCAmelCase_ : int="<unk>" , UpperCAmelCase_ : List[str]="<pad>" , UpperCAmelCase_ : Union[str, Any]="<mask>" , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : Tuple , ) ->Optional[int]:
"""simple docstring"""
snake_case_ = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else bos_token
snake_case_ = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else eos_token
snake_case_ = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else sep_token
snake_case_ = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else cls_token
snake_case_ = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else unk_token
snake_case_ = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token
super().__init__(
errors=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , **_lowerCAmelCase , )
with open(_lowerCAmelCase , encoding="""utf-8""" ) as vocab_handle:
snake_case_ = json.load(_lowerCAmelCase )
snake_case_ = {v: k for k, v in self.encoder.items()}
snake_case_ = errors # how to handle errors in decoding
snake_case_ = bytes_to_unicode()
snake_case_ = {v: k for k, v in self.byte_encoder.items()}
with open(_lowerCAmelCase , encoding="""utf-8""" ) as merges_handle:
snake_case_ = merges_handle.read().split("""\n""" )[1:-1]
snake_case_ = [tuple(merge.split() ) for merge in bpe_merges]
snake_case_ = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
snake_case_ = {}
snake_case_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case_ = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def lowerCAmelCase ( self : Tuple ) ->Tuple:
"""simple docstring"""
return len(self.encoder )
def lowerCAmelCase ( self : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : Union[str, Any] ) ->int:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
snake_case_ = tuple(_lowerCAmelCase )
snake_case_ = get_pairs(_lowerCAmelCase )
if not pairs:
return token
while True:
snake_case_ = min(_lowerCAmelCase , key=lambda UpperCAmelCase_ : self.bpe_ranks.get(_lowerCAmelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case_ , snake_case_ = bigram
snake_case_ = []
snake_case_ = 0
while i < len(_lowerCAmelCase ):
try:
snake_case_ = word.index(_lowerCAmelCase , _lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case_ = j
if word[i] == first and i < len(_lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case_ = tuple(_lowerCAmelCase )
snake_case_ = new_word
if len(_lowerCAmelCase ) == 1:
break
else:
snake_case_ = get_pairs(_lowerCAmelCase )
snake_case_ = """ """.join(_lowerCAmelCase )
snake_case_ = word
return word
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : int ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = []
for token in re.findall(self.pat , _lowerCAmelCase ):
snake_case_ = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowerCAmelCase ).split(""" """ ) )
return bpe_tokens
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Tuple ) ->List[Any]:
"""simple docstring"""
return self.encoder.get(_lowerCAmelCase , self.encoder.get(self.unk_token ) )
def lowerCAmelCase ( self : int , UpperCAmelCase_ : int ) ->List[str]:
"""simple docstring"""
return self.decoder.get(_lowerCAmelCase )
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : List[str] ) ->Tuple:
"""simple docstring"""
snake_case_ = """""".join(_lowerCAmelCase )
snake_case_ = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple = None ) ->Dict:
"""simple docstring"""
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case_ = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case_ = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCAmelCase , ensure_ascii=_lowerCAmelCase ) + """\n""" )
snake_case_ = 0
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
snake_case_ = token_index
writer.write(""" """.join(_lowerCAmelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] = None ) ->Optional[Any]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] = None , UpperCAmelCase_ : List[str] = False ) ->List[Any]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] = None ) ->Tuple:
"""simple docstring"""
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any]=False , **UpperCAmelCase_ : Optional[int] ) ->str:
"""simple docstring"""
snake_case_ = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowerCAmelCase ) > 0 and not text[0].isspace()):
snake_case_ = """ """ + text
return (text, kwargs)
| 708
|
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : int = ['model.decoder.embed_positions.weights']
def _a ( _SCREAMING_SNAKE_CASE ) -> str:
if "emb" in name:
snake_case_ = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
snake_case_ = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
snake_case_ = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
snake_case_ = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
snake_case_ = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
snake_case_ = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
snake_case_ = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
snake_case_ = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
snake_case_ = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
snake_case_ = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
snake_case_ = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple[Dict, Dict]:
snake_case_ = list(state_dict.keys() )
snake_case_ = {}
for key in keys:
snake_case_ = state_dict.pop(_SCREAMING_SNAKE_CASE )
snake_case_ = rename_keys(_SCREAMING_SNAKE_CASE )
if "in_proj_weight" in key:
# split fused qkv proj
snake_case_ = val[:hidden_size, :]
snake_case_ = val[hidden_size : 2 * hidden_size, :]
snake_case_ = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
snake_case_ = val
else:
snake_case_ = val
return state_dict, enc_dec_proj_state_dict
def _a ( _SCREAMING_SNAKE_CASE ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
snake_case_ = 1_024
snake_case_ = 24
snake_case_ = 16
elif checkpoint == "medium":
snake_case_ = 1_536
snake_case_ = 48
snake_case_ = 24
elif checkpoint == "large":
snake_case_ = 2_048
snake_case_ = 48
snake_case_ = 32
else:
raise ValueError(f"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
snake_case_ = MusicgenDecoderConfig(
hidden_size=_SCREAMING_SNAKE_CASE , ffn_dim=hidden_size * 4 , num_hidden_layers=_SCREAMING_SNAKE_CASE , num_attention_heads=_SCREAMING_SNAKE_CASE , )
return config
@torch.no_grad()
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="cpu" ) -> Tuple:
snake_case_ = MusicGen.get_pretrained(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
snake_case_ = decoder_config_from_checkpoint(_SCREAMING_SNAKE_CASE )
snake_case_ = fairseq_model.lm.state_dict()
snake_case_ , snake_case_ = rename_state_dict(
_SCREAMING_SNAKE_CASE , hidden_size=decoder_config.hidden_size )
snake_case_ = TaEncoderModel.from_pretrained("""t5-base""" )
snake_case_ = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
snake_case_ = MusicgenForCausalLM(_SCREAMING_SNAKE_CASE ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
snake_case_ , snake_case_ = decoder.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(f"""Missing key(s) in state_dict: {missing_keys}""" )
if len(_SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(f"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
snake_case_ = MusicgenForConditionalGeneration(text_encoder=_SCREAMING_SNAKE_CASE , audio_encoder=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(_SCREAMING_SNAKE_CASE )
# check we can do a forward pass
snake_case_ = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
snake_case_ = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
snake_case_ = model(input_ids=_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
snake_case_ = AutoTokenizer.from_pretrained("""t5-base""" )
snake_case_ = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
snake_case_ = MusicgenProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
# set the appropriate bos/pad token ids
snake_case_ = 2_048
snake_case_ = 2_048
# set other default generation config params
snake_case_ = int(30 * audio_encoder.config.frame_rate )
snake_case_ = True
snake_case_ = 3.0
if pytorch_dump_folder is not None:
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
logger.info(f"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if repo_id:
logger.info(f"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(_SCREAMING_SNAKE_CASE )
processor.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint',
default='small',
type=str,
help='Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.',
)
parser.add_argument(
'--pytorch_dump_folder',
required=True,
default=None,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
parser.add_argument(
'--device', default='cpu', type=str, help='Torch device to run the conversion, either cpu or cuda.'
)
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 2
| 0
|
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""UserAgent""": UserAgent().random}
def _lowerCamelCase ( __lowerCamelCase ) -> dict:
'''simple docstring'''
UpperCAmelCase__ : Tuple = script.contents[0]
UpperCAmelCase__ : List[Any] = json.loads(data[data.find("""{\"config\"""" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class UpperCAmelCase_ :
def __init__( self , _lowerCAmelCase ):
UpperCAmelCase__ : Any = f"https://www.instagram.com/{username}/"
UpperCAmelCase__ : Tuple = self.get_json()
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Optional[int] = requests.get(self.url , headers=_lowerCAmelCase ).text
UpperCAmelCase__ : Optional[Any] = BeautifulSoup(_lowerCAmelCase , """html.parser""" ).find_all("""script""" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
return f"{self.__class__.__name__}('{self.username}')"
def __str__( self ):
return f"{self.fullname} ({self.username}) is {self.biography}"
@property
def __UpperCAmelCase ( self ):
return self.user_data["username"]
@property
def __UpperCAmelCase ( self ):
return self.user_data["full_name"]
@property
def __UpperCAmelCase ( self ):
return self.user_data["biography"]
@property
def __UpperCAmelCase ( self ):
return self.user_data["business_email"]
@property
def __UpperCAmelCase ( self ):
return self.user_data["external_url"]
@property
def __UpperCAmelCase ( self ):
return self.user_data["edge_followed_by"]["count"]
@property
def __UpperCAmelCase ( self ):
return self.user_data["edge_follow"]["count"]
@property
def __UpperCAmelCase ( self ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __UpperCAmelCase ( self ):
return self.user_data["profile_pic_url_hd"]
@property
def __UpperCAmelCase ( self ):
return self.user_data["is_verified"]
@property
def __UpperCAmelCase ( self ):
return self.user_data["is_private"]
def _lowerCamelCase ( __lowerCamelCase = "github" ) -> None:
'''simple docstring'''
import os
if os.environ.get("""CI""" ):
return # test failing on GitHub Actions
UpperCAmelCase__ : Optional[Any] = InstagramUser(__lowerCamelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __lowerCamelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 12_0000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("""https://instagram.""" )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ : Dict = InstagramUser("""github""")
print(instagram_user)
print(f'''{instagram_user.number_of_posts = }''')
print(f'''{instagram_user.number_of_followers = }''')
print(f'''{instagram_user.number_of_followings = }''')
print(f'''{instagram_user.email = }''')
print(f'''{instagram_user.website = }''')
print(f'''{instagram_user.profile_picture_url = }''')
print(f'''{instagram_user.is_verified = }''')
print(f'''{instagram_user.is_private = }''')
| 79
|
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__A = logging.get_logger('transformers.models.encodec')
__A = {
'quantizer.vq.layers.*._codebook.inited': 'quantizer.layers.*.codebook.inited',
'quantizer.vq.layers.*._codebook.cluster_size': 'quantizer.layers.*.codebook.cluster_size',
'quantizer.vq.layers.*._codebook.embed': 'quantizer.layers.*.codebook.embed',
'quantizer.vq.layers.*._codebook.embed_avg': 'quantizer.layers.*.codebook.embed_avg',
}
__A = {
'encoder.model.0.conv.conv': 'encoder.layers.0.conv',
'encoder.model.1.block.1.conv.conv': 'encoder.layers.1.block.1.conv',
'encoder.model.1.block.3.conv.conv': 'encoder.layers.1.block.3.conv',
'encoder.model.1.shortcut.conv.conv': 'encoder.layers.1.shortcut.conv',
'encoder.model.3.conv.conv': 'encoder.layers.3.conv',
'encoder.model.4.block.1.conv.conv': 'encoder.layers.4.block.1.conv',
'encoder.model.4.block.3.conv.conv': 'encoder.layers.4.block.3.conv',
'encoder.model.4.shortcut.conv.conv': 'encoder.layers.4.shortcut.conv',
'encoder.model.6.conv.conv': 'encoder.layers.6.conv',
'encoder.model.7.block.1.conv.conv': 'encoder.layers.7.block.1.conv',
'encoder.model.7.block.3.conv.conv': 'encoder.layers.7.block.3.conv',
'encoder.model.7.shortcut.conv.conv': 'encoder.layers.7.shortcut.conv',
'encoder.model.9.conv.conv': 'encoder.layers.9.conv',
'encoder.model.10.block.1.conv.conv': 'encoder.layers.10.block.1.conv',
'encoder.model.10.block.3.conv.conv': 'encoder.layers.10.block.3.conv',
'encoder.model.10.shortcut.conv.conv': 'encoder.layers.10.shortcut.conv',
'encoder.model.12.conv.conv': 'encoder.layers.12.conv',
'encoder.model.13.lstm': 'encoder.layers.13.lstm',
'encoder.model.15.conv.conv': 'encoder.layers.15.conv',
}
__A = {
'encoder.model.0.conv.norm': 'encoder.layers.0.norm',
'encoder.model.1.block.1.conv.norm': 'encoder.layers.1.block.1.norm',
'encoder.model.1.block.3.conv.norm': 'encoder.layers.1.block.3.norm',
'encoder.model.1.shortcut.conv.norm': 'encoder.layers.1.shortcut.norm',
'encoder.model.3.conv.norm': 'encoder.layers.3.norm',
'encoder.model.4.block.1.conv.norm': 'encoder.layers.4.block.1.norm',
'encoder.model.4.block.3.conv.norm': 'encoder.layers.4.block.3.norm',
'encoder.model.4.shortcut.conv.norm': 'encoder.layers.4.shortcut.norm',
'encoder.model.6.conv.norm': 'encoder.layers.6.norm',
'encoder.model.7.block.1.conv.norm': 'encoder.layers.7.block.1.norm',
'encoder.model.7.block.3.conv.norm': 'encoder.layers.7.block.3.norm',
'encoder.model.7.shortcut.conv.norm': 'encoder.layers.7.shortcut.norm',
'encoder.model.9.conv.norm': 'encoder.layers.9.norm',
'encoder.model.10.block.1.conv.norm': 'encoder.layers.10.block.1.norm',
'encoder.model.10.block.3.conv.norm': 'encoder.layers.10.block.3.norm',
'encoder.model.10.shortcut.conv.norm': 'encoder.layers.10.shortcut.norm',
'encoder.model.12.conv.norm': 'encoder.layers.12.norm',
'encoder.model.15.conv.norm': 'encoder.layers.15.norm',
}
__A = {
'decoder.model.0.conv.conv': 'decoder.layers.0.conv',
'decoder.model.1.lstm': 'decoder.layers.1.lstm',
'decoder.model.3.convtr.convtr': 'decoder.layers.3.conv',
'decoder.model.4.block.1.conv.conv': 'decoder.layers.4.block.1.conv',
'decoder.model.4.block.3.conv.conv': 'decoder.layers.4.block.3.conv',
'decoder.model.4.shortcut.conv.conv': 'decoder.layers.4.shortcut.conv',
'decoder.model.6.convtr.convtr': 'decoder.layers.6.conv',
'decoder.model.7.block.1.conv.conv': 'decoder.layers.7.block.1.conv',
'decoder.model.7.block.3.conv.conv': 'decoder.layers.7.block.3.conv',
'decoder.model.7.shortcut.conv.conv': 'decoder.layers.7.shortcut.conv',
'decoder.model.9.convtr.convtr': 'decoder.layers.9.conv',
'decoder.model.10.block.1.conv.conv': 'decoder.layers.10.block.1.conv',
'decoder.model.10.block.3.conv.conv': 'decoder.layers.10.block.3.conv',
'decoder.model.10.shortcut.conv.conv': 'decoder.layers.10.shortcut.conv',
'decoder.model.12.convtr.convtr': 'decoder.layers.12.conv',
'decoder.model.13.block.1.conv.conv': 'decoder.layers.13.block.1.conv',
'decoder.model.13.block.3.conv.conv': 'decoder.layers.13.block.3.conv',
'decoder.model.13.shortcut.conv.conv': 'decoder.layers.13.shortcut.conv',
'decoder.model.15.conv.conv': 'decoder.layers.15.conv',
}
__A = {
'decoder.model.0.conv.norm': 'decoder.layers.0.norm',
'decoder.model.3.convtr.norm': 'decoder.layers.3.norm',
'decoder.model.4.block.1.conv.norm': 'decoder.layers.4.block.1.norm',
'decoder.model.4.block.3.conv.norm': 'decoder.layers.4.block.3.norm',
'decoder.model.4.shortcut.conv.norm': 'decoder.layers.4.shortcut.norm',
'decoder.model.6.convtr.norm': 'decoder.layers.6.norm',
'decoder.model.7.block.1.conv.norm': 'decoder.layers.7.block.1.norm',
'decoder.model.7.block.3.conv.norm': 'decoder.layers.7.block.3.norm',
'decoder.model.7.shortcut.conv.norm': 'decoder.layers.7.shortcut.norm',
'decoder.model.9.convtr.norm': 'decoder.layers.9.norm',
'decoder.model.10.block.1.conv.norm': 'decoder.layers.10.block.1.norm',
'decoder.model.10.block.3.conv.norm': 'decoder.layers.10.block.3.norm',
'decoder.model.10.shortcut.conv.norm': 'decoder.layers.10.shortcut.norm',
'decoder.model.12.convtr.norm': 'decoder.layers.12.norm',
'decoder.model.13.block.1.conv.norm': 'decoder.layers.13.block.1.norm',
'decoder.model.13.block.3.conv.norm': 'decoder.layers.13.block.3.norm',
'decoder.model.13.shortcut.conv.norm': 'decoder.layers.13.shortcut.norm',
'decoder.model.15.conv.norm': 'decoder.layers.15.norm',
}
__A = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__A = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__A = []
__A = []
def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
for attribute in key.split('''.''' ):
_A = getattr(_lowercase , _lowercase )
if weight_type is not None:
_A = getattr(_lowercase , _lowercase ).shape
else:
_A = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
_A = value
elif weight_type == "weight_g":
_A = value
elif weight_type == "weight_v":
_A = value
elif weight_type == "bias":
_A = value
elif weight_type == "running_mean":
_A = value
elif weight_type == "running_var":
_A = value
elif weight_type == "num_batches_tracked":
_A = value
elif weight_type == "weight_ih_l0":
_A = value
elif weight_type == "weight_hh_l0":
_A = value
elif weight_type == "bias_ih_l0":
_A = value
elif weight_type == "bias_hh_l0":
_A = value
elif weight_type == "weight_ih_l1":
_A = value
elif weight_type == "weight_hh_l1":
_A = value
elif weight_type == "bias_ih_l1":
_A = value
elif weight_type == "bias_hh_l1":
_A = value
else:
_A = value
logger.info(f"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" )
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_A ,_A = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = []
if model_name == "encodec_24khz" or "encodec_32khz":
_A = MAPPING_24K
elif model_name == "encodec_48khz":
_A = MAPPING_48K
else:
raise ValueError(f"""Unsupported model: {model_name}""" )
for name, value in orig_dict.items():
if should_ignore(_lowercase , _lowercase ):
logger.info(f"""{name} was ignored""" )
continue
_A = False
for key, mapped_key in MAPPING.items():
if "*" in key:
_A ,_A = key.split('''.*.''' )
if prefix in name and suffix in name:
_A = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ):
continue
_A = True
if "*" in mapped_key:
_A = name.split(_lowercase )[0].split('''.''' )[-2]
_A = mapped_key.replace('''*''' , _lowercase )
if "weight_g" in name:
_A = '''weight_g'''
elif "weight_v" in name:
_A = '''weight_v'''
elif "weight_ih_l0" in name:
_A = '''weight_ih_l0'''
elif "weight_hh_l0" in name:
_A = '''weight_hh_l0'''
elif "bias_ih_l0" in name:
_A = '''bias_ih_l0'''
elif "bias_hh_l0" in name:
_A = '''bias_hh_l0'''
elif "weight_ih_l1" in name:
_A = '''weight_ih_l1'''
elif "weight_hh_l1" in name:
_A = '''weight_hh_l1'''
elif "bias_ih_l1" in name:
_A = '''bias_ih_l1'''
elif "bias_hh_l1" in name:
_A = '''bias_hh_l1'''
elif "bias" in name:
_A = '''bias'''
elif "weight" in name:
_A = '''weight'''
elif "running_mean" in name:
_A = '''running_mean'''
elif "running_var" in name:
_A = '''running_var'''
elif "num_batches_tracked" in name:
_A = '''num_batches_tracked'''
else:
_A = None
set_recursively(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
continue
if not is_used:
unused_weights.append(_lowercase )
logger.warning(f"""Unused weights: {unused_weights}""" )
@torch.no_grad()
def __A ( _lowercase , _lowercase , _lowercase , _lowercase=None , _lowercase=None , ):
'''simple docstring'''
if config_path is not None:
_A = EncodecConfig.from_pretrained(_lowercase )
else:
_A = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
_A = [8, 5, 4, 4]
_A = [2.2]
_A = 64
_A = 3_20_00
_A = 20_48
_A = False
_A = False
_A = False
elif model_name == "encodec_48khz":
_A = [8, 5, 4, 2]
_A = [3.0, 6.0, 12.0, 24.0]
_A = 4_80_00
_A = 2
_A = False
_A = '''time_group_norm'''
_A = True
_A = 1.0
_A = 0.01
else:
raise ValueError(f"""Unknown model name: {model_name}""" )
_A = EncodecModel(_lowercase )
_A = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(_lowercase )
_A = torch.load(_lowercase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
_A = original_checkpoint['''best_state''']
recursively_load_weights(_lowercase , _lowercase , _lowercase )
model.save_pretrained(_lowercase )
if repo_id:
print('''Pushing to the hub...''' )
feature_extractor.push_to_hub(_lowercase )
model.push_to_hub(_lowercase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
'--model',
default='encodec_24khz',
type=str,
help='The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
__A = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 484
| 0
|
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("socket.socket" )
@patch("builtins.open" )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> List[str]:
"""simple docstring"""
snake_case_ : List[str] = Mock()
snake_case_ : Any = conn, Mock()
snake_case_ : int = iter([1, None] )
snake_case_ : str = lambda __magic_name__ : next(lowercase__ )
# ===== invoke =====
send_file(filename="mytext.txt" ,testing=lowercase__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 719
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :Any ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _A ( self :List[Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Any = 1
snake_case_ : Dict = 3
snake_case_ : Union[str, Any] = (32, 32)
snake_case_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
return image
@property
def _A ( self :Optional[int] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def _A ( self :Dict ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _A ( self :Dict ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : str = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_006 , )
return RobertaSeriesModelWithTransformation(lowerCAmelCase__ )
@property
def _A ( self :Any ) -> str:
'''simple docstring'''
def extract(*lowerCAmelCase__ :Any , **lowerCAmelCase__ :List[str] ):
class A_ :
"""simple docstring"""
def __init__( self :Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case_ : str = torch.ones([0] )
def _A ( self :int , lowerCAmelCase__ :List[Any] ) -> Tuple:
'''simple docstring'''
self.pixel_values.to(lowerCAmelCase__ )
return self
return Out()
return extract
def _A ( self :int ) -> Dict:
'''simple docstring'''
snake_case_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case_ : str = self.dummy_cond_unet
snake_case_ : Optional[int] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
snake_case_ : Dict = self.dummy_vae
snake_case_ : Dict = self.dummy_text_encoder
snake_case_ : Optional[int] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
snake_case_ : str = 77
snake_case_ : Any = self.dummy_image.to(lowerCAmelCase__ )
snake_case_ : Tuple = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
snake_case_ : Optional[Any] = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , )
snake_case_ : Union[str, Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ )
snake_case_ : Optional[Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : Dict = "A painting of a squirrel eating a burger"
snake_case_ : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
snake_case_ : Dict = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , )
snake_case_ : Any = output.images
snake_case_ : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
snake_case_ : Optional[Any] = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )[0]
snake_case_ : Tuple = image[0, -3:, -3:, -1]
snake_case_ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ : int = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _A ( self :int ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.dummy_cond_unet
snake_case_ : Union[str, Any] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
snake_case_ : int = self.dummy_vae
snake_case_ : List[Any] = self.dummy_text_encoder
snake_case_ : int = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
snake_case_ : int = 77
snake_case_ : Dict = self.dummy_image.to(lowerCAmelCase__ )
# put models in fp16
snake_case_ : Optional[Any] = unet.half()
snake_case_ : Tuple = vae.half()
snake_case_ : List[str] = bert.half()
# make sure here that pndm scheduler skips prk
snake_case_ : Optional[int] = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , )
snake_case_ : List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ )
snake_case_ : Optional[Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : List[Any] = "A painting of a squirrel eating a burger"
snake_case_ : str = torch.manual_seed(0 )
snake_case_ : Any = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _A ( self :Optional[int] ) -> Any:
'''simple docstring'''
snake_case_ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
snake_case_ : str = init_image.resize((760, 504) )
snake_case_ : Optional[Any] = "BAAI/AltDiffusion"
snake_case_ : int = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
snake_case_ : Tuple = "A fantasy landscape, trending on artstation"
snake_case_ : int = torch.manual_seed(0 )
snake_case_ : List[str] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type="np" , )
snake_case_ : str = output.images[0]
snake_case_ : List[Any] = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
snake_case_ : Tuple = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self :str ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
snake_case_ : List[Any] = init_image.resize((768, 512) )
snake_case_ : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
snake_case_ : Any = "BAAI/AltDiffusion"
snake_case_ : List[str] = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
snake_case_ : Tuple = "A fantasy landscape, trending on artstation"
snake_case_ : Tuple = torch.manual_seed(0 )
snake_case_ : List[Any] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type="np" , )
snake_case_ : Optional[int] = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 656
| 0
|
"""simple docstring"""
def _a ( _snake_case = 10**9 ):
"""simple docstring"""
UpperCAmelCase = 1
UpperCAmelCase = 2
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
UpperCAmelCase = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F"""{solution() = }""")
| 341
|
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
SCREAMING_SNAKE_CASE = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 1_2,
"""Pm""": 1_5,
"""Em""": 1_8,
"""Zm""": 2_1,
"""Ym""": 2_4,
}
def snake_case_ ( lowercase__ , lowercase__ , lowercase__ ):
UpperCAmelCase__ : str = from_type.lower().strip("s" )
UpperCAmelCase__ : Any = to_type.lower().strip("s" )
UpperCAmelCase__ : Union[str, Any] = UNIT_SYMBOL.get(lowercase__ , lowercase__ )
UpperCAmelCase__ : Any = UNIT_SYMBOL.get(lowercase__ , lowercase__ )
if from_sanitized not in METRIC_CONVERSION:
UpperCAmelCase__ : str = (
F"""Invalid 'from_type' value: {from_type!r}.\n"""
F"""Conversion abbreviations are: {", ".join(lowercase__ )}"""
)
raise ValueError(lowercase__ )
if to_sanitized not in METRIC_CONVERSION:
UpperCAmelCase__ : List[str] = (
F"""Invalid 'to_type' value: {to_type!r}.\n"""
F"""Conversion abbreviations are: {", ".join(lowercase__ )}"""
)
raise ValueError(lowercase__ )
UpperCAmelCase__ : Optional[int] = METRIC_CONVERSION[from_sanitized]
UpperCAmelCase__ : str = METRIC_CONVERSION[to_sanitized]
UpperCAmelCase__ : Tuple = 1
if from_exponent > to_exponent:
UpperCAmelCase__ : Tuple = from_exponent - to_exponent
else:
UpperCAmelCase__ : Optional[int] = -(to_exponent - from_exponent)
return value * pow(1_0 , lowercase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 199
| 0
|
'''simple docstring'''
class snake_case__ :
def __init__( self : Dict , _A : List[str] ) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = val
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Any = None
def A ( self : Optional[int] , _A : int ) -> Tuple:
if self.val:
if val < self.val:
if self.left is None:
UpperCAmelCase_ : Any = Node(_A )
else:
self.left.insert(_A )
elif val > self.val:
if self.right is None:
UpperCAmelCase_ : Optional[Any] = Node(_A )
else:
self.right.insert(_A )
else:
UpperCAmelCase_ : Union[str, Any] = val
def __UpperCAmelCase ( A : Optional[Any] , A : Optional[Any] ) -> Union[str, Any]:
# Recursive traversal
if root:
inorder(root.left , A )
res.append(root.val )
inorder(root.right , A )
def __UpperCAmelCase ( A : List[str] ) -> Union[str, Any]:
# Build BST
if len(A ) == 0:
return arr
UpperCAmelCase_ : Dict = Node(arr[0] )
for i in range(1 , len(A ) ):
root.insert(arr[i] )
# Traverse BST in order.
UpperCAmelCase_ : str = []
inorder(A , A )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 216
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = BioGptTokenizer
a_ = False
def A ( self : int ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ : Any = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
UpperCAmelCase_ : List[str] = dict(zip(_A , range(len(_A ) ) ) )
UpperCAmelCase_ : Tuple = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(_A ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(_A ) )
def A ( self : List[Any] , _A : Optional[Any] ) -> Any:
UpperCAmelCase_ : int = '''lower newer'''
UpperCAmelCase_ : Tuple = '''lower newer'''
return input_text, output_text
def A ( self : str ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = BioGptTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase_ : Tuple = '''lower'''
UpperCAmelCase_ : List[Any] = ['''low''', '''er</w>''']
UpperCAmelCase_ : List[Any] = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
UpperCAmelCase_ : Any = tokens + ['''<unk>''']
UpperCAmelCase_ : List[Any] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , _A )
@slow
def A ( self : int ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
UpperCAmelCase_ : List[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=_A )
UpperCAmelCase_ : Any = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_A )
UpperCAmelCase_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_A )
UpperCAmelCase_ : Optional[int] = tokenizer.build_inputs_with_special_tokens(_A , _A )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 216
| 1
|
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase : Dict =get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''')
@require_sentencepiece
@require_tokenizers
class __a ( A__ , unittest.TestCase ):
_lowerCAmelCase : List[Any] = GPTSwaTokenizer
_lowerCAmelCase : str = False
_lowerCAmelCase : int = True
_lowerCAmelCase : Tuple = False
def __lowercase ( self : str ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase__ : str = GPTSwaTokenizer(SCREAMING_SNAKE_CASE , eos_token="<unk>" , bos_token="<unk>" , pad_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Dict = "This is a test"
UpperCamelCase__ : str = "This is a test"
return input_text, output_text
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ : str = "<s>"
UpperCamelCase__ : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCamelCase__ : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 20_00 )
def __lowercase ( self : Tuple ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 20_00 )
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ : str = GPTSwaTokenizer(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = tokenizer.tokenize("This is a test" )
self.assertListEqual(SCREAMING_SNAKE_CASE , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , [4_65, 2_87, 2_65, 6_31, 8_42] )
UpperCamelCase__ : Optional[int] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
# fmt: off
self.assertListEqual(
SCREAMING_SNAKE_CASE , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] , )
# fmt: on
UpperCamelCase__ : Optional[int] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE )
self.assertListEqual(
SCREAMING_SNAKE_CASE , [2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] , )
UpperCamelCase__ : List[str] = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE )
# fmt: off
self.assertListEqual(
SCREAMING_SNAKE_CASE , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] )
# fmt: on
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ : Tuple = GPTSwaTokenizer(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = ["This is a test", "I was born in 92000, and this is falsé."]
UpperCamelCase__ : Optional[Any] = [
[4_65, 2_87, 2_65, 6_31, 8_42],
[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assertListEqual(tokenizer.encode_fast(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
# Test that decode_fast returns the input text
for text, token_ids in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assertEqual(tokenizer.decode_fast(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
@slow
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : Tuple = [
"<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')",
"Hey there, how are you doing this fine day?",
"This is a text with a trailing spaces followed by a dot .",
"Häj sväjs lillebrör! =)",
"Det är inget fel på Mr. Cool",
]
# fmt: off
UpperCamelCase__ : Union[str, Any] = {"input_ids": [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE , model_name="AI-Sweden/gpt-sw3-126m" , sequences=SCREAMING_SNAKE_CASE , )
| 228
|
from graphs.minimum_spanning_tree_kruskal import kruskal
def SCREAMING_SNAKE_CASE ( ) -> str:
UpperCamelCase__ : Tuple = 9
UpperCamelCase__ : Optional[int] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
UpperCamelCase__ : List[str] = kruskal(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase__ : List[str] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(__lowerCAmelCase ) == sorted(__lowerCAmelCase )
| 228
| 1
|
'''simple docstring'''
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
lowerCAmelCase : str = {
"""E""": 12.70,
"""T""": 9.06,
"""A""": 8.17,
"""O""": 7.51,
"""I""": 6.97,
"""N""": 6.75,
"""S""": 6.33,
"""H""": 6.09,
"""R""": 5.99,
"""D""": 4.25,
"""L""": 4.03,
"""C""": 2.78,
"""U""": 2.76,
"""M""": 2.41,
"""W""": 2.36,
"""F""": 2.23,
"""G""": 2.02,
"""Y""": 1.97,
"""P""": 1.93,
"""B""": 1.29,
"""V""": 0.98,
"""K""": 0.77,
"""J""": 0.15,
"""X""": 0.15,
"""Q""": 0.10,
"""Z""": 0.07,
}
lowerCAmelCase : Optional[Any] = """ETAOINSHRDLCUMWFGYPBVKJXQZ"""
lowerCAmelCase : List[str] = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def lowercase (_A ):
"""simple docstring"""
return x[0]
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = get_letter_count(_A )
_lowerCAmelCase : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(_A )
_lowerCAmelCase : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=_A )
_lowerCAmelCase : Dict = ''.join(freq_to_letter[freq] )
_lowerCAmelCase : List[str] = list(freq_to_letter_str.items() )
freq_pairs.sort(key=_A , reverse=_A )
_lowerCAmelCase : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(_A )
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = get_frequency_order(_A )
_lowerCAmelCase : str = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 630
|
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {"""vocab_file""": """spiece.model"""}
lowerCAmelCase : Optional[int] = {
"""vocab_file""": {
"""AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""",
}
}
lowerCAmelCase : Union[str, Any] = {
"""AI-Sweden/gpt-sw3-126m""": 20_48,
"""AI-Sweden/gpt-sw3-350m""": 20_48,
"""AI-Sweden/gpt-sw3-1.6b""": 20_48,
"""AI-Sweden/gpt-sw3-6.7b""": 20_48,
"""AI-Sweden/gpt-sw3-20b""": 20_48,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ["input_ids", "attention_mask"]
def __init__( self , snake_case__ , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCAmelCase : List[Any] = kwargs.get('name_or_path' )
if name_or_path is None:
logger.warning(
'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'
' you are testing the model, this can safely be ignored' )
_lowerCAmelCase : Any = 'None'
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_lowerCAmelCase : str = '<|endoftext|>' if eos_token is None else eos_token
_lowerCAmelCase : Tuple = '<unk>' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_lowerCAmelCase : List[str] = unk_token if pad_token is None else pad_token
_lowerCAmelCase : Optional[int] = eos_token if bos_token is None else bos_token
else:
_lowerCAmelCase : Tuple = '<pad>' if pad_token is None else pad_token
_lowerCAmelCase : Union[str, Any] = '<s>' if bos_token is None else bos_token
super().__init__(
do_lower_case=snake_case__ , remove_space=snake_case__ , keep_accents=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
_lowerCAmelCase : Union[str, Any] = do_lower_case
_lowerCAmelCase : Optional[int] = remove_space
_lowerCAmelCase : Any = keep_accents
_lowerCAmelCase : Optional[int] = vocab_file
_lowerCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case__ )
# Used for whitespace normalization in input texts
# fmt : off
_lowerCAmelCase : Optional[Any] = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', ''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_lowerCAmelCase : Optional[Any] = re.compile(
F'[{"".join(map(snake_case__ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]' )
def __getstate__( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.__dict__.copy()
_lowerCAmelCase : int = None
return state
def __setstate__( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_lowerCAmelCase : int = {}
_lowerCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def a ( self ):
'''simple docstring'''
return len(self.sp_model )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.non_printing_characters_re.sub('' , snake_case__ )
# Normalize whitespaces
_lowerCAmelCase : Tuple = ''.join([char if char not in self.whitespaces else ' ' for char in text] )
# NFC Unicode normalization
_lowerCAmelCase : Union[str, Any] = unicodedata.normalize('NFC' , snake_case__ )
return text
def a ( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = self.preprocess_text(snake_case__ )
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
return self.sp_model.PieceToId(snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
return self.sp_model.IdToPiece(snake_case__ )
@staticmethod
def a ( snake_case__ ):
'''simple docstring'''
return out_string
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : int = []
_lowerCAmelCase : Optional[Any] = ''
_lowerCAmelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case__ ) + token
_lowerCAmelCase : Union[str, Any] = True
_lowerCAmelCase : List[Any] = []
else:
current_sub_tokens.append(snake_case__ )
_lowerCAmelCase : List[Any] = False
out_string += self.sp_model.decode(snake_case__ )
return out_string
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCAmelCase : int = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , 'wb' ) as fi:
_lowerCAmelCase : Any = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
def a ( self , snake_case__ , snake_case__ = False ):
'''simple docstring'''
if isinstance(snake_case__ , snake_case__ ):
_lowerCAmelCase : Optional[Any] = self.preprocess_text(snake_case__ )
_lowerCAmelCase : List[str] = self.sp_model.encode(snake_case__ )
else:
_lowerCAmelCase : Tuple = [self.preprocess_text(snake_case__ ) for t in text]
_lowerCAmelCase : List[str] = self.sp_model.encode(snake_case__ )
if return_tensors is True or return_tensors == "pt":
_lowerCAmelCase : int = torch.tensor(snake_case__ )
return token_ids
def a ( self , snake_case__ ):
'''simple docstring'''
return self.sp_model.decode(snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = [F'User: {text}' if is_user else F'Bot: {text}' for is_user, text in conversation.iter_texts()]
_lowerCAmelCase : str = (
F'{self.eos_token}{self.bos_token}' + F'{self.bos_token}'.join(snake_case__ ) + F'{self.bos_token}Bot:'
)
return self.encode(text=snake_case__ )
| 630
| 1
|
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowercase_ :
def __init__( self , __A , __A=None , __A=None , __A=None , __A="resnet50" , __A=3 , __A=32 , __A=3 , __A=True , __A=True , ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : List[Any] =parent
SCREAMING_SNAKE_CASE_ : Union[str, Any] =out_indices if out_indices is not None else [4]
SCREAMING_SNAKE_CASE_ : List[str] =stage_names
SCREAMING_SNAKE_CASE_ : Any =out_features
SCREAMING_SNAKE_CASE_ : Tuple =backbone
SCREAMING_SNAKE_CASE_ : List[Any] =batch_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] =image_size
SCREAMING_SNAKE_CASE_ : Dict =num_channels
SCREAMING_SNAKE_CASE_ : Dict =use_pretrained_backbone
SCREAMING_SNAKE_CASE_ : str =is_training
def _snake_case ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ : Optional[int] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : Optional[int] =self.get_config()
return config, pixel_values
def _snake_case ( self ) -> Optional[int]:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def _snake_case ( self , __A , __A ) -> List[str]:
SCREAMING_SNAKE_CASE_ : str =TimmBackbone(config=__A )
model.to(__A )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : str =model(__A )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def _snake_case ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ : int =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int =config_and_inputs
SCREAMING_SNAKE_CASE_ : str ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowercase_ ( A , A , A , unittest.TestCase ):
__lowerCamelCase = (TimmBackbone,) if is_torch_available() else ()
__lowerCamelCase = {"feature-extraction": TimmBackbone} if is_torch_available() else {}
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def _snake_case ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ : Optional[Any] =TimmBackboneModelTester(self )
SCREAMING_SNAKE_CASE_ : List[str] =ConfigTester(self , config_class=__A , has_text_modality=__A )
def _snake_case ( self ) -> Any:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _snake_case ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ : Optional[Any] ='''resnet18'''
SCREAMING_SNAKE_CASE_ : int ='''microsoft/resnet-18'''
SCREAMING_SNAKE_CASE_ : Any =AutoBackbone.from_pretrained(__A , use_timm_backbone=__A )
SCREAMING_SNAKE_CASE_ : List[str] =AutoBackbone.from_pretrained(__A )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
SCREAMING_SNAKE_CASE_ : Any =AutoBackbone.from_pretrained(__A , use_timm_backbone=__A , out_indices=[1, 2, 3] )
SCREAMING_SNAKE_CASE_ : Dict =AutoBackbone.from_pretrained(__A , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def _snake_case ( self ) -> str:
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def _snake_case ( self ) -> Union[str, Any]:
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def _snake_case ( self ) -> int:
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def _snake_case ( self ) -> Any:
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def _snake_case ( self ) -> Tuple:
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def _snake_case ( self ) -> Tuple:
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def _snake_case ( self ) -> List[Any]:
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def _snake_case ( self ) -> List[Any]:
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def _snake_case ( self ) -> Union[str, Any]:
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def _snake_case ( self ) -> str:
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def _snake_case ( self ) -> Optional[int]:
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def _snake_case ( self ) -> Tuple:
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def _snake_case ( self ) -> Tuple:
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def _snake_case ( self ) -> Union[str, Any]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _snake_case ( self ) -> int:
pass
def _snake_case ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Optional[int] =model_class(__A )
SCREAMING_SNAKE_CASE_ : Optional[int] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : Union[str, Any] =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : Optional[int] =['''pixel_values''']
self.assertListEqual(arg_names[:1] , __A )
def _snake_case ( self ) -> str:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Union[str, Any] =True
SCREAMING_SNAKE_CASE_ : List[Any] =self.has_attentions
# no need to test all models as different heads yield the same functionality
SCREAMING_SNAKE_CASE_ : str =self.all_model_classes[0]
SCREAMING_SNAKE_CASE_ : List[str] =model_class(__A )
model.to(__A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self._prepare_for_class(__A , __A )
SCREAMING_SNAKE_CASE_ : List[str] =model(**__A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =outputs[0][-1]
# Encoder-/Decoder-only models
SCREAMING_SNAKE_CASE_ : Optional[int] =outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
SCREAMING_SNAKE_CASE_ : Tuple =outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__A )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def _snake_case ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : int =model_class(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple =model(**__A )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
SCREAMING_SNAKE_CASE_ : Optional[int] =copy.deepcopy(__A )
SCREAMING_SNAKE_CASE_ : List[Any] =None
SCREAMING_SNAKE_CASE_ : Tuple =model_class(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] =model(**__A )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
SCREAMING_SNAKE_CASE_ : List[str] =copy.deepcopy(__A )
SCREAMING_SNAKE_CASE_ : Any =False
SCREAMING_SNAKE_CASE_ : Any =model_class(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE_ : Any =model(**__A )
| 443
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class lowercase_ ( unittest.TestCase ):
def __init__( self , __A , __A=7 , __A=3 , __A=30 , __A=400 , __A=True , __A=None , __A=True , __A=[0.5, 0.5, 0.5] , __A=[0.5, 0.5, 0.5] , __A=True , __A=1 / 255 , __A=True , ) -> Tuple:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
SCREAMING_SNAKE_CASE_ : Any =size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333}
SCREAMING_SNAKE_CASE_ : Dict =parent
SCREAMING_SNAKE_CASE_ : Optional[Any] =batch_size
SCREAMING_SNAKE_CASE_ : List[Any] =num_channels
SCREAMING_SNAKE_CASE_ : Optional[int] =min_resolution
SCREAMING_SNAKE_CASE_ : str =max_resolution
SCREAMING_SNAKE_CASE_ : int =do_resize
SCREAMING_SNAKE_CASE_ : Optional[int] =size
SCREAMING_SNAKE_CASE_ : str =do_normalize
SCREAMING_SNAKE_CASE_ : Optional[int] =image_mean
SCREAMING_SNAKE_CASE_ : Any =image_std
SCREAMING_SNAKE_CASE_ : Optional[int] =do_rescale
SCREAMING_SNAKE_CASE_ : Union[str, Any] =rescale_factor
SCREAMING_SNAKE_CASE_ : str =do_pad
def _snake_case ( self ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _snake_case ( self , __A , __A=False ) -> Any:
if not batched:
SCREAMING_SNAKE_CASE_ : str =image_inputs[0]
if isinstance(__A , Image.Image ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] =image.size
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str =image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE_ : List[Any] =int(self.size['''shortest_edge'''] * h / w )
SCREAMING_SNAKE_CASE_ : Optional[int] =self.size['''shortest_edge''']
elif w > h:
SCREAMING_SNAKE_CASE_ : List[Any] =self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE_ : Dict =int(self.size['''shortest_edge'''] * w / h )
else:
SCREAMING_SNAKE_CASE_ : str =self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE_ : str =self.size['''shortest_edge''']
else:
SCREAMING_SNAKE_CASE_ : Any =[]
for image in image_inputs:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE_ : str =max(__A , key=lambda __A : item[0] )[0]
SCREAMING_SNAKE_CASE_ : List[Any] =max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase_ ( A , unittest.TestCase ):
__lowerCamelCase = YolosImageProcessor if is_vision_available() else None
def _snake_case ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ : str =YolosImageProcessingTester(self )
@property
def _snake_case ( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , '''image_mean''' ) )
self.assertTrue(hasattr(__A , '''image_std''' ) )
self.assertTrue(hasattr(__A , '''do_normalize''' ) )
self.assertTrue(hasattr(__A , '''do_resize''' ) )
self.assertTrue(hasattr(__A , '''size''' ) )
def _snake_case ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : str =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333} )
self.assertEqual(image_processor.do_pad , __A )
SCREAMING_SNAKE_CASE_ : Dict =self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__A )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , __A )
def _snake_case ( self ) -> List[str]:
pass
def _snake_case ( self ) -> int:
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : str =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : Union[str, Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : List[str] =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] =self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] =self.image_processor_tester.get_expected_values(__A , batched=__A )
SCREAMING_SNAKE_CASE_ : str =image_processing(__A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self ) -> Optional[Any]:
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Tuple =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ : Union[str, Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Dict =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] =self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_ : Dict =image_processing(__A , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] =self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : str =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : Dict =prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Any =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_ : str =image_processing(__A , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] =self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self ) -> List[Any]:
# Initialize image_processings
SCREAMING_SNAKE_CASE_ : List[str] =self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE_ : str =self.image_processing_class(do_resize=__A , do_normalize=__A , do_rescale=__A )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : Dict =prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
SCREAMING_SNAKE_CASE_ : List[Any] =image_processing_a.pad(__A , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =image_processing_a(__A , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1e-4 ) )
@slow
def _snake_case ( self ) -> Any:
# prepare image and target
SCREAMING_SNAKE_CASE_ : Dict =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] =json.loads(f.read() )
SCREAMING_SNAKE_CASE_ : Dict ={'''image_id''': 39_769, '''annotations''': target}
# encode them
SCREAMING_SNAKE_CASE_ : Union[str, Any] =YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
SCREAMING_SNAKE_CASE_ : Dict =image_processing(images=__A , annotations=__A , return_tensors='''pt''' )
# verify pixel values
SCREAMING_SNAKE_CASE_ : Dict =torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['''pixel_values'''].shape , __A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE_ : Tuple =torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __A ) )
# verify boxes
SCREAMING_SNAKE_CASE_ : Dict =torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __A )
SCREAMING_SNAKE_CASE_ : Optional[Any] =torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __A , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE_ : Union[str, Any] =torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __A ) )
# verify is_crowd
SCREAMING_SNAKE_CASE_ : Optional[int] =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __A ) )
# verify class_labels
SCREAMING_SNAKE_CASE_ : str =torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __A ) )
# verify orig_size
SCREAMING_SNAKE_CASE_ : str =torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __A ) )
# verify size
SCREAMING_SNAKE_CASE_ : Dict =torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __A ) )
@slow
def _snake_case ( self ) -> Tuple:
# prepare image, target and masks_path
SCREAMING_SNAKE_CASE_ : Union[str, Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] =json.loads(f.read() )
SCREAMING_SNAKE_CASE_ : Union[str, Any] ={'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target}
SCREAMING_SNAKE_CASE_ : Tuple =pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
SCREAMING_SNAKE_CASE_ : Optional[Any] =YolosImageProcessor(format='''coco_panoptic''' )
SCREAMING_SNAKE_CASE_ : Optional[int] =image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors='''pt''' )
# verify pixel values
SCREAMING_SNAKE_CASE_ : Optional[int] =torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['''pixel_values'''].shape , __A )
SCREAMING_SNAKE_CASE_ : Optional[Any] =torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE_ : List[Any] =torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __A ) )
# verify boxes
SCREAMING_SNAKE_CASE_ : Tuple =torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __A )
SCREAMING_SNAKE_CASE_ : List[Any] =torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __A , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE_ : Any =torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __A ) )
# verify is_crowd
SCREAMING_SNAKE_CASE_ : Tuple =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __A ) )
# verify class_labels
SCREAMING_SNAKE_CASE_ : Optional[Any] =torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __A ) )
# verify masks
SCREAMING_SNAKE_CASE_ : List[Any] =822_873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __A )
# verify orig_size
SCREAMING_SNAKE_CASE_ : List[str] =torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __A ) )
# verify size
SCREAMING_SNAKE_CASE_ : Any =torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __A ) )
| 443
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class lowerCAmelCase_ ( snake_case__ ):
"""simple docstring"""
a_ :List[str] ="""biogpt"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int=4_2_3_8_4 , SCREAMING_SNAKE_CASE__ : List[str]=1_0_2_4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2_4 , SCREAMING_SNAKE_CASE__ : str=1_6 , SCREAMING_SNAKE_CASE__ : Dict=4_0_9_6 , SCREAMING_SNAKE_CASE__ : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=0.1 , SCREAMING_SNAKE_CASE__ : int=1_0_2_4 , SCREAMING_SNAKE_CASE__ : List[str]=0.0_2 , SCREAMING_SNAKE_CASE__ : Tuple=1E-12 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : int=0.0 , SCREAMING_SNAKE_CASE__ : Any=0.0 , SCREAMING_SNAKE_CASE__ : int=1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0 , SCREAMING_SNAKE_CASE__ : Tuple=2 , **SCREAMING_SNAKE_CASE__ : List[Any] , ):
'''simple docstring'''
__a = vocab_size
__a = max_position_embeddings
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = scale_embedding
__a = use_cache
__a = layerdrop
__a = activation_dropout
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 201
|
'''simple docstring'''
def __lowercase ( __SCREAMING_SNAKE_CASE = 100_0000 ) -> int:
"""simple docstring"""
__a = 1
__a = 1
__a = {1: 1}
for inputa in range(2 , __SCREAMING_SNAKE_CASE ):
__a = 0
__a = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__a = (3 * number) + 1
counter += 1
if inputa not in counters:
__a = counter
if counter > pre_counter:
__a = inputa
__a = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 201
| 1
|
'''simple docstring'''
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
_UpperCamelCase : Optional[Any] =True
from torch.cuda.amp import autocast
_UpperCamelCase : List[str] =logging.getLogger(__name__)
@dataclass
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
SCREAMING_SNAKE_CASE_ = field(
default=UpperCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
SCREAMING_SNAKE_CASE_ = field(
default=UpperCamelCase , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
SCREAMING_SNAKE_CASE_ = field(
default=UpperCamelCase , metadata={'help': 'Whether to log verbose messages or not.'} , )
SCREAMING_SNAKE_CASE_ = field(
default=2.0 , metadata={'help': 'Maximum temperature for gumbel softmax.'} )
SCREAMING_SNAKE_CASE_ = field(
default=0.5 , metadata={'help': 'Minimum temperature for gumbel softmax.'} )
SCREAMING_SNAKE_CASE_ = field(
default=0.999995 , metadata={'help': 'Decay of gumbel temperature during training.'} )
def lowerCamelCase_ ( A_ , A_ ):
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
__lowerCamelCase = logging.WARNING
if model_args.verbose_logging:
__lowerCamelCase = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
__lowerCamelCase = logging.INFO
logger.setLevel(A_ )
@dataclass
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = field(
default=UpperCamelCase , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
SCREAMING_SNAKE_CASE_ = field(
default=UpperCamelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
SCREAMING_SNAKE_CASE_ = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
SCREAMING_SNAKE_CASE_ = field(
default='validation' , metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
SCREAMING_SNAKE_CASE_ = field(
default='file' , metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} , )
SCREAMING_SNAKE_CASE_ = field(
default=UpperCamelCase , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
SCREAMING_SNAKE_CASE_ = field(
default=1 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
SCREAMING_SNAKE_CASE_ = field(
default=UpperCamelCase , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
SCREAMING_SNAKE_CASE_ = field(
default=20.0 , metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'} )
@dataclass
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = "longest"
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
def __call__( self , _snake_case ):
"""simple docstring"""
__lowerCamelCase = self.feature_extractor.pad(
_snake_case , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
__lowerCamelCase = self.model._get_feat_extract_output_lengths(batch['''input_values'''].shape[-1] )
__lowerCamelCase = batch['''input_values'''].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
__lowerCamelCase = self.model._get_feat_extract_output_lengths(batch['''attention_mask'''].sum(-1 ) ).to(
torch.long )
__lowerCamelCase = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['''input_values'''].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
__lowerCamelCase = 1
__lowerCamelCase = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
__lowerCamelCase = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=_snake_case , min_masks=2 , )
return batch
class _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
def __init__( self , *_snake_case , _snake_case=1 , _snake_case=0 , _snake_case=1.0 , **_snake_case ):
"""simple docstring"""
super().__init__(*_snake_case , **_snake_case )
__lowerCamelCase = 0
__lowerCamelCase = max_gumbel_temp
__lowerCamelCase = min_gumbel_temp
__lowerCamelCase = gumbel_temp_decay
def _lowerCamelCase ( self , _snake_case , _snake_case ):
"""simple docstring"""
model.train()
__lowerCamelCase = self._prepare_inputs(_snake_case )
if self.use_amp:
with autocast():
__lowerCamelCase = self.compute_loss(_snake_case , _snake_case )
else:
__lowerCamelCase = self.compute_loss(_snake_case , _snake_case )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
__lowerCamelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__lowerCamelCase = loss.sum() / (inputs['''mask_time_indices''']).sum()
else:
raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
__lowerCamelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_snake_case ).backward()
elif self.use_apex:
with amp.scale_loss(_snake_case , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_snake_case )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def lowerCamelCase_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = parser.parse_args_into_dataclasses()
configure_logger(A_ , A_ )
# Downloading and loading a dataset from the hub.
__lowerCamelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
__lowerCamelCase = DatasetDict()
__lowerCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , )
__lowerCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
__lowerCamelCase = DatasetDict()
__lowerCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='''validation''' , cache_dir=model_args.cache_dir , )
__lowerCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
__lowerCamelCase = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=A_ )
def prepare_dataset(A_ ):
# check that all files have the correct sampling rate
__lowerCamelCase , __lowerCamelCase = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
__lowerCamelCase = datasets.map(
A_ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['''train'''].column_names )
# filter audio files that are too long
__lowerCamelCase = vectorized_datasets.filter(
lambda A_ : len(data['''speech'''] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(A_ ):
return feature_extractor(batch['''speech'''] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
__lowerCamelCase = vectorized_datasets.map(
A_ , batched=A_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['''train'''].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
__lowerCamelCase = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'''PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'''
''' ``config.feat_extract_norm=\'layer\'''' )
__lowerCamelCase = WavaVecaForPreTraining(A_ )
__lowerCamelCase = DataCollatorForWavaVecaPretraining(model=A_ , feature_extractor=A_ )
__lowerCamelCase = WavaVecaPreTrainer(
model=A_ , data_collator=A_ , args=A_ , train_dataset=vectorized_datasets['''train'''] , eval_dataset=vectorized_datasets['''validation'''] , tokenizer=A_ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 316
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _snake_case , _snake_case=7 , _snake_case=3 , _snake_case=18 , _snake_case=30 , _snake_case=4_00 , _snake_case=True , _snake_case=None , _snake_case=True , _snake_case=[0.5, 0.5, 0.5] , _snake_case=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
__lowerCamelCase = size if size is not None else {'''height''': 18, '''width''': 18}
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = num_channels
__lowerCamelCase = image_size
__lowerCamelCase = min_resolution
__lowerCamelCase = max_resolution
__lowerCamelCase = do_resize
__lowerCamelCase = size
__lowerCamelCase = do_normalize
__lowerCamelCase = image_mean
__lowerCamelCase = image_std
def _lowerCamelCase ( self ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = DPTImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = DPTImageProcessingTester(self )
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case , '''image_mean''' ) )
self.assertTrue(hasattr(_snake_case , '''image_std''' ) )
self.assertTrue(hasattr(_snake_case , '''do_normalize''' ) )
self.assertTrue(hasattr(_snake_case , '''do_resize''' ) )
self.assertTrue(hasattr(_snake_case , '''size''' ) )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
__lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , Image.Image )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__lowerCamelCase = image_processing(_snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , numpify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , np.ndarray )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__lowerCamelCase = image_processing(_snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , torchify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , torch.Tensor )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__lowerCamelCase = image_processing(_snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 316
| 1
|
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
__lowercase = grid[0]
for row_n in range(1 , len(__lowerCAmelCase ) ):
__lowercase = grid[row_n]
__lowercase = fill_row(__lowerCAmelCase , __lowerCAmelCase )
__lowercase = grid[row_n]
return grid[-1][-1]
def lowercase_ ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 , len(__lowerCAmelCase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712
|
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def lowercase_ ( *_UpperCamelCase ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowercase = list(_UpperCamelCase )
for i in range(len(_UpperCamelCase ) ):
__lowercase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
__lowercase = [
'''CUDA out of memory.''', # CUDA OOM
'''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU
'''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM
]
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def lowercase_ ( _UpperCamelCase = None , _UpperCamelCase = 1_28 ):
'''simple docstring'''
if function is None:
return functools.partial(_UpperCamelCase , starting_batch_size=_UpperCamelCase )
__lowercase = starting_batch_size
def decorator(*_UpperCamelCase , **_UpperCamelCase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
__lowercase = list(inspect.signature(_UpperCamelCase ).parameters.keys() )
# Guard against user error
if len(_UpperCamelCase ) < (len(_UpperCamelCase ) + 1):
__lowercase = ''', '''.join([F'{arg}={value}' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F'Batch size was passed into `{function.__name__}` as the first argument when called.'
F'Remove this as the decorator already does so: `{function.__name__}({arg_str})`' )
while True:
if batch_size == 0:
raise RuntimeError('''No executable batch size found, reached zero.''' )
try:
return function(_UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase )
except Exception as e:
if should_reduce_batch_size(_UpperCamelCase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 527
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : Union[str, Any] = {
'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'],
'tokenization_electra': ['ElectraTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = ['ElectraTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = [
'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'ElectraForCausalLM',
'ElectraForMaskedLM',
'ElectraForMultipleChoice',
'ElectraForPreTraining',
'ElectraForQuestionAnswering',
'ElectraForSequenceClassification',
'ElectraForTokenClassification',
'ElectraModel',
'ElectraPreTrainedModel',
'load_tf_weights_in_electra',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] = [
'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFElectraForMaskedLM',
'TFElectraForMultipleChoice',
'TFElectraForPreTraining',
'TFElectraForQuestionAnswering',
'TFElectraForSequenceClassification',
'TFElectraForTokenClassification',
'TFElectraModel',
'TFElectraPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
'FlaxElectraForCausalLM',
'FlaxElectraForMaskedLM',
'FlaxElectraForMultipleChoice',
'FlaxElectraForPreTraining',
'FlaxElectraForQuestionAnswering',
'FlaxElectraForSequenceClassification',
'FlaxElectraForTokenClassification',
'FlaxElectraModel',
'FlaxElectraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
_lowercase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49
|
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : Any = StableUnCLIPPipeline
a__ : Dict = TEXT_TO_IMAGE_PARAMS
a__ : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
a__ : int = TEXT_TO_IMAGE_IMAGE_PARAMS
a__ : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
a__ : Optional[int] = False
def a ( self : List[str] ):
__UpperCAmelCase = 32
__UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=_lowercase , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_lowercase , num_layers=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=10_00 , clip_sample=_lowercase , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
__UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=_lowercase )
__UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowercase , layers_per_block=1 , upcast_attention=_lowercase , use_linear_projection=_lowercase , )
torch.manual_seed(0 )
__UpperCAmelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.00_085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=_lowercase , steps_offset=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL()
__UpperCAmelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def a ( self : str , _lowercase : Dict , _lowercase : List[str]=0 ):
if str(_lowercase ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(_lowercase )
else:
__UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def a ( self : Any ):
__UpperCAmelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=_lowercase )
def a ( self : int ):
__UpperCAmelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_lowercase )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : Any ):
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__UpperCAmelCase = pipe('''anime turle''' , generator=_lowercase , output_type='''np''' )
__UpperCAmelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_lowercase , _lowercase )
def a ( self : Any ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
__UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 49
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ = {
'''configuration_lilt''': ['''LILT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LiltConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''LILT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LiltForQuestionAnswering''',
'''LiltForSequenceClassification''',
'''LiltForTokenClassification''',
'''LiltModel''',
'''LiltPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 712
|
'''simple docstring'''
from __future__ import annotations
import time
a__ = list[tuple[int, int]]
a__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a__ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class __magic_name__:
def __init__( self : List[Any] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : Node | None ):
'''simple docstring'''
snake_case__ = pos_x
snake_case__ = pos_y
snake_case__ = (pos_y, pos_x)
snake_case__ = goal_x
snake_case__ = goal_y
snake_case__ = parent
class __magic_name__:
def __init__( self : Optional[int] , __UpperCamelCase : tuple[int, int] , __UpperCamelCase : tuple[int, int] ):
'''simple docstring'''
snake_case__ = Node(start[1] , start[0] , goal[1] , goal[0] , __UpperCamelCase )
snake_case__ = Node(goal[1] , goal[0] , goal[1] , goal[0] , __UpperCamelCase )
snake_case__ = [self.start]
snake_case__ = False
def __lowerCAmelCase( self : Dict ):
'''simple docstring'''
while self.node_queue:
snake_case__ = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
snake_case__ = True
return self.retrace_path(__UpperCamelCase )
snake_case__ = self.get_successors(__UpperCamelCase )
for node in successors:
self.node_queue.append(__UpperCamelCase )
if not self.reached:
return [self.start.pos]
return None
def __lowerCAmelCase( self : Tuple , __UpperCamelCase : Node ):
'''simple docstring'''
snake_case__ = []
for action in delta:
snake_case__ = parent.pos_x + action[1]
snake_case__ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__UpperCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(__UpperCamelCase , __UpperCamelCase , self.target.pos_y , self.target.pos_x , __UpperCamelCase ) )
return successors
def __lowerCAmelCase( self : Tuple , __UpperCamelCase : Node | None ):
'''simple docstring'''
snake_case__ = node
snake_case__ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case__ = current_node.parent
path.reverse()
return path
class __magic_name__:
def __init__( self : Tuple , __UpperCamelCase : Any , __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case__ = BreadthFirstSearch(__UpperCamelCase , __UpperCamelCase )
snake_case__ = BreadthFirstSearch(__UpperCamelCase , __UpperCamelCase )
snake_case__ = False
def __lowerCAmelCase( self : Dict ):
'''simple docstring'''
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
snake_case__ = self.fwd_bfs.node_queue.pop(0 )
snake_case__ = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
snake_case__ = True
return self.retrace_bidirectional_path(
__UpperCamelCase , __UpperCamelCase )
snake_case__ = current_bwd_node
snake_case__ = current_fwd_node
snake_case__ = {
self.fwd_bfs: self.fwd_bfs.get_successors(__UpperCamelCase ),
self.bwd_bfs: self.bwd_bfs.get_successors(__UpperCamelCase ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(__UpperCamelCase )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def __lowerCAmelCase( self : Optional[Any] , __UpperCamelCase : Node , __UpperCamelCase : Node ):
'''simple docstring'''
snake_case__ = self.fwd_bfs.retrace_path(__UpperCamelCase )
snake_case__ = self.bwd_bfs.retrace_path(__UpperCamelCase )
bwd_path.pop()
bwd_path.reverse()
snake_case__ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
a__ = (0, 0)
a__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
a__ = time.time()
a__ = BreadthFirstSearch(init, goal)
a__ = bfs.search()
a__ = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
a__ = time.time()
a__ = BidirectionalBreadthFirstSearch(init, goal)
a__ = bd_bfs.search()
a__ = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 566
| 0
|
import requests
A__ = '''https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='''
def _lowerCAmelCase ( __lowerCAmelCase ) -> None:
"""simple docstring"""
snake_case__ : Any = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['''articles'''] , 1 ):
print(f"""{i}.) {article['title']}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='''<Your BBC News API key goes here>''')
| 252
|
from __future__ import annotations
import math
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> float:
"""simple docstring"""
snake_case__ : Tuple = u
for i in range(1 , __lowerCAmelCase ):
snake_case__ : Dict = temp * (u - i)
return temp
def _lowerCAmelCase ( ) -> None:
"""simple docstring"""
snake_case__ : Tuple = int(input('''enter the numbers of values: ''' ) )
snake_case__ : list[list[float]] = []
for _ in range(__lowerCAmelCase ):
y.append([] )
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
y[i].append(__lowerCAmelCase )
snake_case__ : Union[str, Any] = 0
print('''enter the values of parameters in a list: ''' )
snake_case__ : List[str] = list(map(__lowerCAmelCase , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(__lowerCAmelCase ):
snake_case__ : int = float(input() )
snake_case__ : Union[str, Any] = int(input('''enter the value to interpolate: ''' ) )
snake_case__ : List[str] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , __lowerCAmelCase ):
for j in range(n - i ):
snake_case__ : Optional[Any] = y[j + 1][i - 1] - y[j][i - 1]
snake_case__ : str = y[0][0]
for i in range(1 , __lowerCAmelCase ):
summ += (ucal(__lowerCAmelCase , __lowerCAmelCase ) * y[0][i]) / math.factorial(__lowerCAmelCase )
print(f"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 252
| 1
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
__lowerCamelCase = logging.get_logger(__name__)
@dataclass
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self : List[str] , **__snake_case : Tuple ) -> Any:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__magic_name__: str = deprecated_arg[3:]
setattr(self , __snake_case , not kwargs.pop(__snake_case ) )
logger.warning(
F'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'
F' {positive_arg}={kwargs[positive_arg]}' )
__magic_name__: Optional[int] = kwargs.pop("""torchscript""" , self.torchscript )
__magic_name__: Tuple = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics )
__magic_name__: Dict = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level )
super().__init__(**__snake_case )
UpperCAmelCase__ = field(default=SCREAMING_SNAKE_CASE_ ,metadata={"help": "Trace the models using torchscript"} )
UpperCAmelCase__ = field(default=SCREAMING_SNAKE_CASE_ ,metadata={"help": "Print Xla/PyTorch tpu metrics"} )
UpperCAmelCase__ = field(
default="O1" ,metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} ,)
@cached_property
def lowerCamelCase__ ( self : Any ) -> Tuple["torch.device", int]:
requires_backends(self , ["""torch"""] )
logger.info("""PyTorch: setting up devices""" )
if not self.cuda:
__magic_name__: int = torch.device("""cpu""" )
__magic_name__: str = 0
elif is_torch_tpu_available():
__magic_name__: Tuple = xm.xla_device()
__magic_name__: Tuple = 0
else:
__magic_name__: List[Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
__magic_name__: Union[str, Any] = torch.cuda.device_count()
return device, n_gpu
@property
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
return is_torch_tpu_available() and self.tpu
@property
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
requires_backends(self , ["""torch"""] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def lowerCamelCase__ ( self : str ) -> "torch.device":
requires_backends(self , ["""torch"""] )
return self._setup_devices[0]
@property
def lowerCamelCase__ ( self : Tuple ) -> Dict:
requires_backends(self , ["""torch"""] )
return self._setup_devices[1]
@property
def lowerCamelCase__ ( self : Dict ) -> List[Any]:
return self.n_gpu > 0
| 213
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = "SpeechT5FeatureExtractor"
UpperCAmelCase__ = "SpeechT5Tokenizer"
def __init__( self : List[Any] , __snake_case : Dict , __snake_case : Union[str, Any] ) -> Tuple:
super().__init__(__snake_case , __snake_case )
def __call__( self : Tuple , *__snake_case : Tuple , **__snake_case : Tuple ) -> Any:
__magic_name__: List[Any] = kwargs.pop("""audio""" , __snake_case )
__magic_name__: Optional[int] = kwargs.pop("""text""" , __snake_case )
__magic_name__: Tuple = kwargs.pop("""text_target""" , __snake_case )
__magic_name__: List[str] = kwargs.pop("""audio_target""" , __snake_case )
__magic_name__: Dict = kwargs.pop("""sampling_rate""" , __snake_case )
if audio is not None and text is not None:
raise ValueError(
"""Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?""" )
if audio_target is not None and text_target is not None:
raise ValueError(
"""Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?""" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"""You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.""" )
if audio is not None:
__magic_name__: str = self.feature_extractor(__snake_case , *__snake_case , sampling_rate=__snake_case , **__snake_case )
elif text is not None:
__magic_name__: List[str] = self.tokenizer(__snake_case , **__snake_case )
else:
__magic_name__: Tuple = None
if audio_target is not None:
__magic_name__: List[str] = self.feature_extractor(audio_target=__snake_case , *__snake_case , sampling_rate=__snake_case , **__snake_case )
__magic_name__: Any = targets["""input_values"""]
elif text_target is not None:
__magic_name__: List[str] = self.tokenizer(__snake_case , **__snake_case )
__magic_name__: Dict = targets["""input_ids"""]
else:
__magic_name__: Union[str, Any] = None
if inputs is None:
return targets
if targets is not None:
__magic_name__: Optional[int] = labels
__magic_name__: Optional[int] = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
__magic_name__: Tuple = decoder_attention_mask
return inputs
def lowerCamelCase__ ( self : Tuple , *__snake_case : Dict , **__snake_case : int ) -> List[str]:
__magic_name__: List[Any] = kwargs.pop("""input_values""" , __snake_case )
__magic_name__: Any = kwargs.pop("""input_ids""" , __snake_case )
__magic_name__: Tuple = kwargs.pop("""labels""" , __snake_case )
if input_values is not None and input_ids is not None:
raise ValueError("""Cannot process both `input_values` and `input_ids` inputs.""" )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"""You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.""" )
if input_values is not None:
__magic_name__: Tuple = self.feature_extractor.pad(__snake_case , *__snake_case , **__snake_case )
elif input_ids is not None:
__magic_name__: int = self.tokenizer.pad(__snake_case , **__snake_case )
else:
__magic_name__: Any = None
if labels is not None:
if "input_ids" in labels or (isinstance(__snake_case , __snake_case ) and "input_ids" in labels[0]):
__magic_name__: Union[str, Any] = self.tokenizer.pad(__snake_case , **__snake_case )
__magic_name__: Any = targets["""input_ids"""]
else:
__magic_name__: Optional[Any] = self.feature_extractor.feature_size
__magic_name__: Optional[int] = self.feature_extractor.num_mel_bins
__magic_name__: str = self.feature_extractor.pad(__snake_case , *__snake_case , **__snake_case )
__magic_name__: Tuple = feature_size_hack
__magic_name__: Tuple = targets["""input_values"""]
else:
__magic_name__: int = None
if inputs is None:
return targets
if targets is not None:
__magic_name__: List[Any] = labels
__magic_name__: Dict = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
__magic_name__: Tuple = decoder_attention_mask
return inputs
def lowerCamelCase__ ( self : Dict , *__snake_case : Optional[int] , **__snake_case : Union[str, Any] ) -> Any:
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def lowerCamelCase__ ( self : List[str] , *__snake_case : List[str] , **__snake_case : str ) -> Union[str, Any]:
return self.tokenizer.decode(*__snake_case , **__snake_case )
| 213
| 1
|
'''simple docstring'''
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
A__ : List[Any] = """__DUMMY_TRANSFORMERS_USER__"""
A__ : Optional[int] = """Dummy User"""
A__ : List[str] = """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"""
A__ : Dict = """https://hub-ci.huggingface.co"""
A__ : Tuple = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}"""
A__ : Optional[Any] = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}"""
A__ : List[Any] = Path("""~/.huggingface/hub_ci_token""").expanduser()
@pytest.fixture
def UpperCAmelCase__ ( UpperCAmelCase_ : Union[str, Any] ) -> int:
monkeypatch.setattr(
'huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE' , UpperCAmelCase_ )
@pytest.fixture
def UpperCAmelCase__ ( UpperCAmelCase_ : Tuple ) -> Optional[Any]:
monkeypatch.setattr('datasets.config.HF_ENDPOINT' , UpperCAmelCase_ )
monkeypatch.setattr('datasets.config.HUB_DATASETS_URL' , UpperCAmelCase_ )
@pytest.fixture
def UpperCAmelCase__ ( UpperCAmelCase_ : Any ) -> Any:
monkeypatch.setattr('huggingface_hub.hf_api.HfFolder.path_token' , UpperCAmelCase_ )
@pytest.fixture
def UpperCAmelCase__ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] ) -> Optional[int]:
HfFolder.save_token(UpperCAmelCase_ )
yield
HfFolder.delete_token()
@pytest.fixture(scope='session' )
def UpperCAmelCase__ ( ) -> Any:
return HfApi(endpoint=UpperCAmelCase_ )
@pytest.fixture(scope='session' )
def UpperCAmelCase__ ( UpperCAmelCase_ : HfApi ) -> Optional[Any]:
__lowerCamelCase : List[Any] = HfFolder.get_token()
HfFolder.save_token(UpperCAmelCase_ )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(UpperCAmelCase_ )
@pytest.fixture
def UpperCAmelCase__ ( UpperCAmelCase_ : str ) -> Optional[int]:
def _cleanup_repo(UpperCAmelCase_ : List[str] ):
hf_api.delete_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type='dataset' )
return _cleanup_repo
@pytest.fixture
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> List[Any]:
@contextmanager
def _temporary_repo(UpperCAmelCase_ : Union[str, Any] ):
try:
yield repo_id
finally:
cleanup_repo(UpperCAmelCase_ )
return _temporary_repo
@pytest.fixture(scope='session' )
def UpperCAmelCase__ ( UpperCAmelCase_ : HfApi , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] ) -> Optional[Any]:
__lowerCamelCase : int = F'repo_txt_data-{int(time.time() * 10e3 )}'
__lowerCamelCase : Optional[Any] = F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type='dataset' , private=UpperCAmelCase_ )
hf_api.upload_file(
token=UpperCAmelCase_ , path_or_fileobj=str(UpperCAmelCase_ ) , path_in_repo='data/text_data.txt' , repo_id=UpperCAmelCase_ , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def UpperCAmelCase__ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ) -> List[Any]:
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='session' )
def UpperCAmelCase__ ( UpperCAmelCase_ : HfApi , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any ) -> Optional[Any]:
__lowerCamelCase : List[str] = F'repo_zipped_txt_data-{int(time.time() * 10e3 )}'
__lowerCamelCase : int = F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type='dataset' , private=UpperCAmelCase_ )
hf_api.upload_file(
token=UpperCAmelCase_ , path_or_fileobj=str(UpperCAmelCase_ ) , path_in_repo='data.zip' , repo_id=UpperCAmelCase_ , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def UpperCAmelCase__ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str ) -> Dict:
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='session' )
def UpperCAmelCase__ ( UpperCAmelCase_ : HfApi , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple ) -> Union[str, Any]:
__lowerCamelCase : Tuple = F'repo_zipped_img_data-{int(time.time() * 10e3 )}'
__lowerCamelCase : Any = F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type='dataset' , private=UpperCAmelCase_ )
hf_api.upload_file(
token=UpperCAmelCase_ , path_or_fileobj=str(UpperCAmelCase_ ) , path_in_repo='data.zip' , repo_id=UpperCAmelCase_ , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] ) -> Union[str, Any]:
return hf_private_dataset_repo_zipped_img_data_
| 13
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : Dict ="ClapFeatureExtractor"
UpperCAmelCase_ : Union[str, Any] =("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : List[str] = kwargs.pop("sampling_rate" , UpperCAmelCase )
if text is None and audios is None:
raise ValueError("You have to specify either text or audios. Both cannot be none." )
if text is not None:
__snake_case : Optional[int] = self.tokenizer(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if audios is not None:
__snake_case : int = self.feature_extractor(
UpperCAmelCase , sampling_rate=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if text is not None and audios is not None:
__snake_case : str = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase ) , tensor_type=UpperCAmelCase )
def UpperCAmelCase ( self , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self , *UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case : Tuple = self.tokenizer.model_input_names
__snake_case : List[str] = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 243
| 0
|
"""simple docstring"""
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def _a ( _snake_case ):
"""simple docstring"""
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def _a ( ):
"""simple docstring"""
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def _a ( ):
"""simple docstring"""
UpperCAmelCase = """mock-s3-bucket"""
UpperCAmelCase = F'''s3://{mock_bucket}'''
UpperCAmelCase = extract_path_from_uri(_snake_case )
assert dataset_path.startswith("""s3://""" ) is False
UpperCAmelCase = """./local/path"""
UpperCAmelCase = extract_path_from_uri(_snake_case )
assert dataset_path == new_dataset_path
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = is_remote_filesystem(_snake_case )
assert is_remote is True
UpperCAmelCase = fsspec.filesystem("""file""" )
UpperCAmelCase = is_remote_filesystem(_snake_case )
assert is_remote is False
@pytest.mark.parametrize("""compression_fs_class""" , _snake_case )
def _a ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_file, """bz2""": bza_file, """lz4""": lza_file}
UpperCAmelCase = input_paths[compression_fs_class.protocol]
if input_path is None:
UpperCAmelCase = F'''for \'{compression_fs_class.protocol}\' compression protocol, '''
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_snake_case )
UpperCAmelCase = fsspec.filesystem(compression_fs_class.protocol , fo=_snake_case )
assert isinstance(_snake_case , _snake_case )
UpperCAmelCase = os.path.basename(_snake_case )
UpperCAmelCase = expected_filename[: expected_filename.rindex(""".""" )]
assert fs.glob("""*""" ) == [expected_filename]
with fs.open(_snake_case , """r""" , encoding="""utf-8""" ) as f, open(_snake_case , encoding="""utf-8""" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("""protocol""" , ["""zip""", """gzip"""] )
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = {"""zip""": zip_jsonl_path, """gzip""": jsonl_gz_path}
UpperCAmelCase = compressed_file_paths[protocol]
UpperCAmelCase = """dataset.jsonl"""
UpperCAmelCase = F'''{protocol}://{member_file_path}::{compressed_file_path}'''
UpperCAmelCase , *UpperCAmelCase = fsspec.get_fs_token_paths(_snake_case )
assert fs.isfile(_snake_case )
assert not fs.isfile("""non_existing_""" + member_file_path )
@pytest.mark.integration
def _a ( _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = hf_api.dataset_info(_snake_case , token=_snake_case )
UpperCAmelCase = HfFileSystem(repo_info=_snake_case , token=_snake_case )
assert sorted(hffs.glob("""*""" ) ) == [".gitattributes", "data"]
assert hffs.isdir("""data""" )
assert hffs.isfile(""".gitattributes""" ) and hffs.isfile("""data/text_data.txt""" )
with open(_snake_case ) as f:
assert hffs.open("""data/text_data.txt""" , """r""" ).read() == f.read()
def _a ( ):
"""simple docstring"""
UpperCAmelCase = """bz2"""
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(_snake_case , _snake_case , clobber=_snake_case )
with pytest.warns(_snake_case ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(_snake_case ) == 1
assert (
str(warning_info[0].message )
== F'''A filesystem protocol was already set for {protocol} and will be overwritten.'''
)
| 74
|
"""simple docstring"""
from __future__ import annotations
def _a ( _snake_case ):
"""simple docstring"""
return len(set(_snake_case ) ) == len(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74
| 1
|
import numpy as np
def _A ( SCREAMING_SNAKE_CASE__ : np.ndarray ):
return 1 / (1 + np.exp(-vector ))
def _A ( SCREAMING_SNAKE_CASE__ : np.ndarray ):
return vector * sigmoid(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658
|
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ):
_enforce_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if n == 0:
return 0
UpperCamelCase :Union[str, Any] = float('''-inf''' )
for i in range(1 , n + 1 ):
UpperCamelCase :str = max(
SCREAMING_SNAKE_CASE__ , prices[i - 1] + naive_cut_rod_recursive(n - i , SCREAMING_SNAKE_CASE__ ) )
return max_revue
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ):
_enforce_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Dict = [float('''-inf''' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : list ):
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
UpperCamelCase :Dict = float('''-inf''' )
for i in range(1 , n + 1 ):
UpperCamelCase :Union[str, Any] = max(
SCREAMING_SNAKE_CASE__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , )
UpperCamelCase :str = max_revenue
return max_rev[n]
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ):
_enforce_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
UpperCamelCase :List[str] = [float('''-inf''' ) for _ in range(n + 1 )]
UpperCamelCase :Dict = 0
for i in range(1 , n + 1 ):
UpperCamelCase :Optional[Any] = max_rev[i]
for j in range(1 , i + 1 ):
UpperCamelCase :Optional[Any] = max(SCREAMING_SNAKE_CASE__ , prices[j - 1] + max_rev[i - j] )
UpperCamelCase :Tuple = max_revenue_i
return max_rev[n]
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ):
if n < 0:
UpperCamelCase :Any = F'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
if n > len(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Union[str, Any] = (
'''Each integral piece of rod must have a corresponding price. '''
F'''Got n = {n} but length of prices = {len(SCREAMING_SNAKE_CASE__ )}'''
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
def _A ( ):
UpperCamelCase :Dict = [6, 10, 12, 15, 20, 23]
UpperCamelCase :List[str] = len(SCREAMING_SNAKE_CASE__ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
UpperCamelCase :str = 36
UpperCamelCase :int = top_down_cut_rod(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Union[str, Any] = bottom_up_cut_rod(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :str = naive_cut_rod_recursive(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 658
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase__ = {
"configuration_transfo_xl": ["TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", "TransfoXLConfig"],
"tokenization_transfo_xl": ["TransfoXLCorpus", "TransfoXLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"AdaptiveEmbedding",
"TransfoXLForSequenceClassification",
"TransfoXLLMHeadModel",
"TransfoXLModel",
"TransfoXLPreTrainedModel",
"load_tf_weights_in_transfo_xl",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAdaptiveEmbedding",
"TFTransfoXLForSequenceClassification",
"TFTransfoXLLMHeadModel",
"TFTransfoXLMainLayer",
"TFTransfoXLModel",
"TFTransfoXLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 705
|
def _UpperCamelCase (a__ :int ):
"""simple docstring"""
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
UpperCamelCase__ = 1
UpperCamelCase__ = 1
while repunit:
UpperCamelCase__ = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _UpperCamelCase (a__ :int = 100_0000 ):
"""simple docstring"""
UpperCamelCase__ = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(a__ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f"""{solution() = }""")
| 548
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ = {
'configuration_clap': [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapAudioConfig',
'ClapConfig',
'ClapTextConfig',
],
'processing_clap': ['ClapProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapModel',
'ClapPreTrainedModel',
'ClapTextModel',
'ClapTextModelWithProjection',
'ClapAudioModel',
'ClapAudioModelWithProjection',
]
SCREAMING_SNAKE_CASE_ = ['ClapFeatureExtractor']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 300
|
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class a :
def __init__( self , A_ = None , A_ = None , A_=None , A_=None ):
'''simple docstring'''
if not conversation_id:
_UpperCAmelCase : Any = uuid.uuida()
if past_user_inputs is None:
_UpperCAmelCase : Optional[int] = []
if generated_responses is None:
_UpperCAmelCase : Dict = []
_UpperCAmelCase : uuid.UUID = conversation_id
_UpperCAmelCase : List[str] = past_user_inputs
_UpperCAmelCase : List[str] = generated_responses
_UpperCAmelCase : Optional[str] = text
def __eq__( self , A_ ):
'''simple docstring'''
if not isinstance(A_ , A_ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def _UpperCAmelCase ( self , A_ , A_ = False ):
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
f'with: "{text}".' )
_UpperCAmelCase : Tuple = text
else:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' )
else:
_UpperCAmelCase : int = text
def _UpperCAmelCase ( self ):
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
_UpperCAmelCase : Dict = None
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
self.generated_responses.append(A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ):
'''simple docstring'''
_UpperCAmelCase : List[str] = f'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
_UpperCAmelCase : Any = "user" if is_user else "bot"
output += f'{name} >> {text} \n'
return output
@add_end_docstrings(
UpperCAmelCase , r"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " , )
class a ( UpperCAmelCase ):
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
super().__init__(*A_ , **A_ )
if self.tokenizer.pad_token_id is None:
_UpperCAmelCase : Union[str, Any] = self.tokenizer.eos_token
def _UpperCAmelCase ( self , A_=None , A_=None , A_=None , **A_ ):
'''simple docstring'''
_UpperCAmelCase : Tuple = {}
_UpperCAmelCase : Dict = {}
_UpperCAmelCase : Optional[int] = {}
if min_length_for_response is not None:
_UpperCAmelCase : Optional[Any] = min_length_for_response
if minimum_tokens is not None:
_UpperCAmelCase : Any = minimum_tokens
if "max_length" in generate_kwargs:
_UpperCAmelCase : Dict = generate_kwargs["max_length"]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_UpperCAmelCase : int = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(A_ )
return preprocess_params, forward_params, postprocess_params
def __call__( self , A_ , A_=0 , **A_ ):
'''simple docstring'''
_UpperCAmelCase : str = super().__call__(A_ , num_workers=A_ , **A_ )
if isinstance(A_ , A_ ) and len(A_ ) == 1:
return outputs[0]
return outputs
def _UpperCAmelCase ( self , A_ , A_=32 ):
'''simple docstring'''
if not isinstance(A_ , A_ ):
raise ValueError("ConversationalPipeline, expects Conversation as inputs" )
if conversation.new_user_input is None:
raise ValueError(
f'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '
"Add user inputs with the conversation's `add_user_input` method" )
if hasattr(self.tokenizer , "_build_conversation_input_ids" ):
_UpperCAmelCase : Optional[Any] = self.tokenizer._build_conversation_input_ids(A_ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_UpperCAmelCase : Optional[int] = self._legacy_parse_and_tokenize(A_ )
if self.framework == "pt":
_UpperCAmelCase : List[str] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
_UpperCAmelCase : str = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def _UpperCAmelCase ( self , A_ , A_=10 , **A_ ):
'''simple docstring'''
_UpperCAmelCase : List[str] = generate_kwargs.get("max_length" , self.model.config.max_length )
_UpperCAmelCase : List[Any] = model_inputs["input_ids"].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' )
_UpperCAmelCase : int = max_length - minimum_tokens
_UpperCAmelCase : Optional[int] = model_inputs["input_ids"][:, -trim:]
if "attention_mask" in model_inputs:
_UpperCAmelCase : Union[str, Any] = model_inputs["attention_mask"][:, -trim:]
_UpperCAmelCase : Optional[int] = model_inputs.pop("conversation" )
_UpperCAmelCase : Union[str, Any] = max_length
_UpperCAmelCase : Any = self.model.generate(**A_ , **A_ )
if self.model.config.is_encoder_decoder:
_UpperCAmelCase : Union[str, Any] = 1
else:
_UpperCAmelCase : List[str] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def _UpperCAmelCase ( self , A_ , A_=True ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = model_outputs["output_ids"]
_UpperCAmelCase : List[Any] = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=A_ , clean_up_tokenization_spaces=A_ , )
_UpperCAmelCase : Any = model_outputs["conversation"]
conversation.mark_processed()
conversation.append_response(A_ )
return conversation
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : str = self.tokenizer.eos_token_id
_UpperCAmelCase : Tuple = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(A_ , add_special_tokens=A_ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(A_ , add_special_tokens=A_ ) )
if len(A_ ) > self.tokenizer.model_max_length:
_UpperCAmelCase : str = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 300
| 1
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[int] ) -> List[Any]:
'''simple docstring'''
lowercase = [False] * len(__UpperCamelCase )
lowercase = [-1] * len(__UpperCamelCase )
def dfs(lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Tuple ):
lowercase = True
lowercase = c
for u in graph[v]:
if not visited[u]:
dfs(__UpperCamelCase , 1 - c )
for i in range(len(__UpperCamelCase ) ):
if not visited[i]:
dfs(__UpperCamelCase , 0 )
for i in range(len(__UpperCamelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
__lowerCAmelCase : Optional[Any] ={0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 721
|
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
__lowerCAmelCase : Any ="""
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
__lowerCAmelCase : Optional[int] ="""
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
__lowerCAmelCase : Optional[int] ="""
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def A__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ):
"""simple docstring"""
if return_pvalue:
lowercase = pearsonr(__lowerCAmelCase , __lowerCAmelCase )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__lowerCAmelCase , __lowerCAmelCase )[0] )}
| 197
| 0
|
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : str ):
'''simple docstring'''
# test for the above condition
self.test()
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : int =0
lowercase : List[Any] =False
while not completed:
if counter == 1:
self.reset()
lowercase : Dict =self.advance()
if not self.does_advance(UpperCAmelCase__ ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
lowercase , lowercase , lowercase : str =self.update(UpperCAmelCase__ )
counter += 1
if counter > 10000:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : int ):
'''simple docstring'''
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : int ):
'''simple docstring'''
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : Optional[Any]=False ):
'''simple docstring'''
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : int , UpperCAmelCase__ : List[int] ):
'''simple docstring'''
super(UpperCAmelCase__ , self ).__init__()
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or len(UpperCAmelCase__ ) == 0:
raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
lowercase : Union[str, Any] =token_ids
lowercase : Tuple =len(self.token_ids )
lowercase : Tuple =-1 # the index of the currently fulfilled step
lowercase : int =False
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : int ):
'''simple docstring'''
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCAmelCase__ )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : int ):
'''simple docstring'''
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCAmelCase__ )}''' )
lowercase : Any =False
lowercase : Optional[int] =False
lowercase : int =False
if self.does_advance(UpperCAmelCase__ ):
self.fulfilled_idx += 1
lowercase : Any =True
if self.fulfilled_idx == (self.seqlen - 1):
lowercase : List[str] =True
lowercase : List[Any] =completed
else:
# failed to make progress.
lowercase : Union[str, Any] =True
self.reset()
return stepped, completed, reset
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : int =False
lowercase : Tuple =0
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return self.seqlen - (self.fulfilled_idx + 1)
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : int=False ):
'''simple docstring'''
lowercase : Dict =PhrasalConstraint(self.token_ids )
if stateful:
lowercase : Union[str, Any] =self.seqlen
lowercase : Optional[Any] =self.fulfilled_idx
lowercase : List[str] =self.completed
return new_constraint
class __SCREAMING_SNAKE_CASE :
def __init__( self : Any , UpperCAmelCase__ : List[List[int]] , UpperCAmelCase__ : Dict=True ):
'''simple docstring'''
lowercase : str =max([len(UpperCAmelCase__ ) for one in nested_token_ids] )
lowercase : Any ={}
for token_ids in nested_token_ids:
lowercase : Any =root
for tidx, token_id in enumerate(UpperCAmelCase__ ):
if token_id not in level:
lowercase : int ={}
lowercase : Union[str, Any] =level[token_id]
if no_subsets and self.has_subsets(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
F''' {nested_token_ids}.''' )
lowercase : Dict =root
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : Dict ):
'''simple docstring'''
lowercase : Any =self.trie
for current_token in current_seq:
lowercase : List[str] =start[current_token]
lowercase : Optional[Any] =list(start.keys() )
return next_tokens
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : List[str] ):
'''simple docstring'''
lowercase : Union[str, Any] =self.next_tokens(UpperCAmelCase__ )
return len(UpperCAmelCase__ ) == 0
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
lowercase : Any =list(root.values() )
if len(UpperCAmelCase__ ) == 0:
return 1
else:
return sum([self.count_leaves(UpperCAmelCase__ ) for nn in next_nodes] )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict ):
'''simple docstring'''
lowercase : List[str] =self.count_leaves(UpperCAmelCase__ )
return len(UpperCAmelCase__ ) != leaf_count
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : List[str] , UpperCAmelCase__ : List[List[int]] ):
'''simple docstring'''
super(UpperCAmelCase__ , self ).__init__()
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or len(UpperCAmelCase__ ) == 0:
raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for token_ids in nested_token_ids ):
raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
lowercase : Any =DisjunctiveTrie(UpperCAmelCase__ )
lowercase : Tuple =nested_token_ids
lowercase : Dict =self.trie.max_height
lowercase : List[str] =[]
lowercase : str =False
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Any =self.trie.next_tokens(self.current_seq )
if len(UpperCAmelCase__ ) == 0:
return None
else:
return token_list
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : int ):
'''simple docstring'''
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCAmelCase__ )}''' )
lowercase : Union[str, Any] =self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : int ):
'''simple docstring'''
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCAmelCase__ )}''' )
lowercase : Any =False
lowercase : Any =False
lowercase : Optional[Any] =False
if self.does_advance(UpperCAmelCase__ ):
self.current_seq.append(UpperCAmelCase__ )
lowercase : Dict =True
else:
lowercase : Tuple =True
self.reset()
lowercase : Union[str, Any] =self.trie.reached_leaf(self.current_seq )
lowercase : int =completed
return stepped, completed, reset
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : int =False
lowercase : Tuple =[]
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Any=False ):
'''simple docstring'''
lowercase : Dict =DisjunctiveConstraint(self.token_ids )
if stateful:
lowercase : Union[str, Any] =self.seqlen
lowercase : int =self.current_seq
lowercase : Dict =self.completed
return new_constraint
class __SCREAMING_SNAKE_CASE :
def __init__( self : int , UpperCAmelCase__ : List[Constraint] ):
'''simple docstring'''
lowercase : List[str] =constraints
# max # of steps required to fulfill a given constraint
lowercase : str =max([c.seqlen for c in constraints] )
lowercase : Any =len(UpperCAmelCase__ )
lowercase : Union[str, Any] =False
self.init_state()
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : List[str] =[]
lowercase : Optional[Any] =None
lowercase : List[Any] =[constraint.copy(stateful=UpperCAmelCase__ ) for constraint in self.constraints]
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : List[Any] =0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : int =[]
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
lowercase : List[str] =constraint.advance()
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
token_list.append(UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
token_list.extend(UpperCAmelCase__ )
else:
lowercase : str =self.inprogress_constraint.advance()
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
token_list.append(UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
token_list.extend(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) == 0:
return None
else:
return token_list
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Optional[List[int]] ):
'''simple docstring'''
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
lowercase , lowercase : Optional[Any] =self.add(UpperCAmelCase__ )
# the entire list of constraints are fulfilled
if self.completed:
break
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : int ):
'''simple docstring'''
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' )
lowercase , lowercase : List[Any] =False, False
if self.completed:
lowercase : Tuple =True
lowercase : Union[str, Any] =False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
lowercase , lowercase , lowercase : Union[str, Any] =self.inprogress_constraint.update(UpperCAmelCase__ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=UpperCAmelCase__ ) )
lowercase : List[str] =None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
lowercase : Optional[Any] =None
if len(self.pending_constraints ) == 0:
# we're done!
lowercase : int =True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(UpperCAmelCase__ ):
lowercase , lowercase , lowercase : List[Any] =pending_constraint.update(UpperCAmelCase__ )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(UpperCAmelCase__ )
lowercase : Union[str, Any] =None
if not complete and stepped:
lowercase : Any =pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
lowercase : int =(
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
lowercase : List[Any] =True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : Optional[int]=True ):
'''simple docstring'''
lowercase : int =ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
lowercase : Tuple =[
constraint.copy(stateful=UpperCAmelCase__ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
lowercase : Dict =self.inprogress_constraint.copy(stateful=UpperCAmelCase__ )
lowercase : int =[constraint.copy() for constraint in self.pending_constraints]
return new_state
| 92
|
from heapq import heappop, heappush
import numpy as np
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
__lowercase , __lowercase = grid.shape
__lowercase = [-1, 1, 0, 0]
__lowercase = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
__lowercase , __lowercase = [(0, source)], set()
__lowercase = np.full((rows, cols) , np.inf )
__lowercase = 0
__lowercase = np.empty((rows, cols) , dtype=_UpperCAmelCase )
__lowercase = None
while queue:
((__lowercase) , (__lowercase)) = heappop(_UpperCAmelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
__lowercase = []
while (x, y) != source:
path.append((x, y) )
__lowercase , __lowercase = predecessors[x, y]
path.append(_UpperCAmelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(_UpperCAmelCase ) ):
__lowercase , __lowercase = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
__lowercase = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(_UpperCAmelCase , (dist + 1, (nx, ny)) )
__lowercase = dist + 1
__lowercase = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 321
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : Optional[Any] = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class a__ ( A__ ):
A = 'mobilenet_v2'
def __init__( self : Union[str, Any],_A : Optional[int]=3,_A : Optional[int]=224,_A : int=1.0,_A : Tuple=8,_A : Optional[int]=8,_A : Union[str, Any]=6,_A : Tuple=32,_A : str=True,_A : Optional[Any]=True,_A : List[str]="relu6",_A : List[str]=True,_A : str=0.8,_A : Dict=0.02,_A : Optional[Any]=0.001,_A : Tuple=255,**_A : Tuple,):
"""simple docstring"""
super().__init__(**_A )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
SCREAMING_SNAKE_CASE_ : str = num_channels
SCREAMING_SNAKE_CASE_ : List[str] = image_size
SCREAMING_SNAKE_CASE_ : List[str] = depth_multiplier
SCREAMING_SNAKE_CASE_ : Optional[Any] = depth_divisible_by
SCREAMING_SNAKE_CASE_ : Dict = min_depth
SCREAMING_SNAKE_CASE_ : Optional[int] = expand_ratio
SCREAMING_SNAKE_CASE_ : Optional[Any] = output_stride
SCREAMING_SNAKE_CASE_ : List[Any] = first_layer_is_expansion
SCREAMING_SNAKE_CASE_ : List[Any] = finegrained_output
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf_padding
SCREAMING_SNAKE_CASE_ : Union[str, Any] = classifier_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = initializer_range
SCREAMING_SNAKE_CASE_ : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Union[str, Any] = semantic_loss_ignore_index
class a__ ( A__ ):
A = version.parse('1.11' )
@property
def __UpperCamelCase ( self : str ):
"""simple docstring"""
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return 1E-4
| 316
|
from __future__ import annotations
from scipy.special import comb # type: ignore
class a__ :
def __init__( self : Union[str, Any],_A : list[tuple[float, float]] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
SCREAMING_SNAKE_CASE_ : List[Any] = len(_A ) - 1
def __UpperCamelCase ( self : Any,_A : float ):
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
SCREAMING_SNAKE_CASE_ : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree,_A ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(_A ),5 ) == 1
return output_values
def __UpperCamelCase ( self : str,_A : float ):
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.basis_function(_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = 0.0
SCREAMING_SNAKE_CASE_ : List[str] = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __UpperCamelCase ( self : Any,_A : float = 0.01 ):
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
SCREAMING_SNAKE_CASE_ : list[float] = [] # x coordinates of points to plot
SCREAMING_SNAKE_CASE_ : list[float] = [] # y coordinates of points to plot
SCREAMING_SNAKE_CASE_ : Tuple = 0.0
while t <= 1:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.bezier_curve_function(_A )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
SCREAMING_SNAKE_CASE_ : Tuple = [i[0] for i in self.list_of_points]
SCREAMING_SNAKE_CASE_ : Tuple = [i[1] for i in self.list_of_points]
plt.plot(
_A,_A,color="blue",label="Curve of Degree " + str(self.degree ),)
plt.scatter(_A,_A,color="red",label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 316
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 42
__UpperCamelCase = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 156
|
'''simple docstring'''
def lowerCAmelCase (__A):
"""simple docstring"""
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6'''))
def lowerCAmelCase (__A):
"""simple docstring"""
_a = credit_card_number
_a = 0
_a = len(__A) - 2
for i in range(__A , -1 , -2):
# double the value of every second digit
_a = int(cc_number[i])
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
_a = cc_number[:i] + str(__A) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(__A) - 1 , -1 , -2):
total += int(cc_number[i])
return total % 10 == 0
def lowerCAmelCase (__A):
"""simple docstring"""
_a = F'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(F'''{error_message} it has nonnumerical characters.''')
return False
if not 13 <= len(__A) <= 16:
print(F'''{error_message} of its length.''')
return False
if not validate_initial_digits(__A):
print(F'''{error_message} of its first two digits.''')
return False
if not luhn_validation(__A):
print(F'''{error_message} it fails the Luhn check.''')
return False
print(F'''{credit_card_number} is a valid credit card number.''')
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 11
| 0
|
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('>=', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
lowerCAmelCase_ = get_logger(__name__)
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=0 ) -> Optional[Any]:
'''simple docstring'''
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
with FSDP.state_dict_type(
__magic_name__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
lowercase : Any = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
lowercase : Optional[Any] = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin"""
lowercase : Union[str, Any] = os.path.join(__magic_name__ , __magic_name__ )
if accelerator.process_index == 0:
logger.info(F"""Saving model to {output_model_file}""" )
torch.save(__magic_name__ , __magic_name__ )
logger.info(F"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
lowercase : List[str] = (
F"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
lowercase : Dict = os.path.join(__magic_name__ , __magic_name__ )
logger.info(F"""Saving model to {output_model_file}""" )
torch.save(__magic_name__ , __magic_name__ )
logger.info(F"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
lowercase : Union[str, Any] = os.path.join(__magic_name__ , F"""{MODEL_NAME}_{model_index}""" )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
logger.info(F"""Saving model to {ckpt_dir}""" )
lowercase : Optional[Any] = {'''model''': state_dict}
dist_cp.save_state_dict(
state_dict=__magic_name__ , storage_writer=dist_cp.FileSystemWriter(__magic_name__ ) , planner=DefaultSavePlanner() , )
logger.info(F"""Model saved to {ckpt_dir}""" )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=0 ) -> Optional[Any]:
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__magic_name__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(__magic_name__ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'''Set the `sync_module_states` flag to `True` so that model states are synced across processes when '''
'''initializing FSDP object''' )
return
lowercase : List[str] = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin"""
lowercase : Union[str, Any] = os.path.join(__magic_name__ , __magic_name__ )
logger.info(F"""Loading model from {input_model_file}""" )
lowercase : Union[str, Any] = torch.load(__magic_name__ )
logger.info(F"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
lowercase : Tuple = (
F"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
lowercase : Any = os.path.join(__magic_name__ , __magic_name__ )
logger.info(F"""Loading model from {input_model_file}""" )
lowercase : Union[str, Any] = torch.load(__magic_name__ )
logger.info(F"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
lowercase : Optional[int] = (
os.path.join(__magic_name__ , F"""{MODEL_NAME}_{model_index}""" )
if F"""{MODEL_NAME}""" not in input_dir
else input_dir
)
logger.info(F"""Loading model from {ckpt_dir}""" )
lowercase : Optional[int] = {'''model''': model.state_dict()}
dist_cp.load_state_dict(
state_dict=__magic_name__ , storage_reader=dist_cp.FileSystemReader(__magic_name__ ) , planner=DefaultLoadPlanner() , )
lowercase : Dict = state_dict['''model''']
logger.info(F"""Model loaded from {ckpt_dir}""" )
model.load_state_dict(__magic_name__ )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=0 ) -> int:
'''simple docstring'''
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
with FSDP.state_dict_type(
__magic_name__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
lowercase : Tuple = FSDP.optim_state_dict(__magic_name__ , __magic_name__ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
lowercase : List[Any] = (
F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
lowercase : Optional[int] = os.path.join(__magic_name__ , __magic_name__ )
logger.info(F"""Saving Optimizer state to {output_optimizer_file}""" )
torch.save(__magic_name__ , __magic_name__ )
logger.info(F"""Optimizer state saved in {output_optimizer_file}""" )
else:
lowercase : Tuple = os.path.join(__magic_name__ , F"""{OPTIMIZER_NAME}_{optimizer_index}""" )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
logger.info(F"""Saving Optimizer state to {ckpt_dir}""" )
dist_cp.save_state_dict(
state_dict={'''optimizer''': optim_state} , storage_writer=dist_cp.FileSystemWriter(__magic_name__ ) , planner=DefaultSavePlanner() , )
logger.info(F"""Optimizer state saved in {ckpt_dir}""" )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=0 ) -> str:
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__magic_name__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
lowercase : Tuple = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
lowercase : int = (
F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
lowercase : Dict = os.path.join(__magic_name__ , __magic_name__ )
logger.info(F"""Loading Optimizer state from {input_optimizer_file}""" )
lowercase : Optional[int] = torch.load(__magic_name__ )
logger.info(F"""Optimizer state loaded from {input_optimizer_file}""" )
else:
lowercase : str = (
os.path.join(__magic_name__ , F"""{OPTIMIZER_NAME}_{optimizer_index}""" )
if F"""{OPTIMIZER_NAME}""" not in input_dir
else input_dir
)
logger.info(F"""Loading Optimizer from {ckpt_dir}""" )
lowercase : int = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='''optimizer''' , storage_reader=dist_cp.FileSystemReader(__magic_name__ ) , )
lowercase : Optional[int] = optim_state['''optimizer''']
logger.info(F"""Optimizer loaded from {ckpt_dir}""" )
lowercase : int = FSDP.optim_state_dict_to_load(__magic_name__ , __magic_name__ , __magic_name__ )
optimizer.load_state_dict(__magic_name__ )
| 596
|
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class _A ( unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = MODEL_FOR_CAUSAL_LM_MAPPING
_UpperCamelCase : Optional[int] = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def __a ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Optional[int] = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
lowercase : List[Any] = text_generator('''This is a test''' , do_sample=_A )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
lowercase : Any = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
_A , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
lowercase : Dict = text_generator('''This is a test''' , do_sample=_A , num_return_sequences=2 , return_tensors=_A )
self.assertEqual(
_A , [
{'''generated_token_ids''': ANY(_A )},
{'''generated_token_ids''': ANY(_A )},
] , )
lowercase : Optional[Any] = text_generator.model.config.eos_token_id
lowercase : Any = '''<pad>'''
lowercase : str = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=_A , num_return_sequences=2 , batch_size=2 , return_tensors=_A , )
self.assertEqual(
_A , [
[
{'''generated_token_ids''': ANY(_A )},
{'''generated_token_ids''': ANY(_A )},
],
[
{'''generated_token_ids''': ANY(_A )},
{'''generated_token_ids''': ANY(_A )},
],
] , )
@require_tf
def __a ( self : Any ) -> Any:
"""simple docstring"""
lowercase : int = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
lowercase : int = text_generator('''This is a test''' , do_sample=_A )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
lowercase : Union[str, Any] = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=_A )
self.assertEqual(
_A , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def __a ( self : Any , _A : Any , _A : Any , _A : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase : int = TextGenerationPipeline(model=_A , tokenizer=_A )
return text_generator, ["This is a test", "Another test"]
def __a ( self : List[Any] ) -> List[str]:
"""simple docstring"""
lowercase : Optional[int] = '''Hello I believe in'''
lowercase : Any = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
lowercase : Optional[int] = text_generator(_A )
self.assertEqual(
_A , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
lowercase : Tuple = text_generator(_A , stop_sequence=''' fe''' )
self.assertEqual(_A , [{'''generated_text''': '''Hello I believe in fe'''}] )
def __a ( self : Any , _A : Any , _A : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase : int = text_generator.model
lowercase : int = text_generator.tokenizer
lowercase : str = text_generator('''This is a test''' )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
lowercase : Any = text_generator('''This is a test''' , return_full_text=_A )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
lowercase : Optional[Any] = pipeline(task='''text-generation''' , model=_A , tokenizer=_A , return_full_text=_A )
lowercase : Optional[Any] = text_generator('''This is a test''' )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
lowercase : List[Any] = text_generator('''This is a test''' , return_full_text=_A )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
lowercase : int = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=_A )
self.assertEqual(
_A , [
[{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}],
[{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}],
] , )
if text_generator.tokenizer.pad_token is not None:
lowercase : List[Any] = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=_A )
self.assertEqual(
_A , [
[{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}],
[{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}],
] , )
with self.assertRaises(_A ):
lowercase : str = text_generator('''test''' , return_full_text=_A , return_text=_A )
with self.assertRaises(_A ):
lowercase : Union[str, Any] = text_generator('''test''' , return_full_text=_A , return_tensors=_A )
with self.assertRaises(_A ):
lowercase : Union[str, Any] = text_generator('''test''' , return_text=_A , return_tensors=_A )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
lowercase : List[Any] = text_generator('''''' )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
lowercase : Dict = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
lowercase : Optional[Any] = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 10_000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 500 , max_new_tokens=20 )
lowercase : str = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(_A ):
text_generator(
'''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def __a ( self : int ) -> List[str]:
"""simple docstring"""
import torch
# Classic `model_kwargs`
lowercase : Tuple = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowercase : str = pipe('''This is a test''' )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
lowercase : Optional[Any] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowercase : Union[str, Any] = pipe('''This is a test''' )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
lowercase : int = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
lowercase : Union[str, Any] = pipe('''This is a test''' )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def __a ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
import torch
lowercase : str = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def __a ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
import torch
lowercase : int = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=_A , top_p=0.5 )
def __a ( self : Dict ) -> str:
"""simple docstring"""
lowercase : str = '''Hello world'''
lowercase : List[Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
lowercase : Dict = logging.get_logger('''transformers.generation.tf_utils''' )
else:
lowercase : int = logging.get_logger('''transformers.generation.utils''' )
lowercase : str = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(_A ) as cl:
lowercase : Optional[Any] = text_generator(_A , max_length=10 , max_new_tokens=1 )
self.assertIn(_A , cl.out )
# The user only sets one -> no warning
with CaptureLogger(_A ) as cl:
lowercase : str = text_generator(_A , max_new_tokens=1 )
self.assertNotIn(_A , cl.out )
with CaptureLogger(_A ) as cl:
lowercase : Optional[int] = text_generator(_A , max_length=10 )
self.assertNotIn(_A , cl.out )
| 596
| 1
|
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="session" )
def lowercase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Optional[Any] = 10
SCREAMING_SNAKE_CASE_ :Optional[Any] = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string" ) ),
"labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"] ) ),
"answers": datasets.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
"id": datasets.Value("int64" ),
} )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = datasets.Dataset.from_dict(
{
"tokens": [["foo"] * 5] * n,
"labels": [[1] * 5] * n,
"answers": [{"answer_start": [97], "text": ["1976"]}] * 10,
"id": list(range(a ) ),
} , features=a , )
return dataset
@pytest.fixture(scope="session" )
def lowercase ( a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :int = str(tmp_path_factory.mktemp("data" ) / "file.arrow" )
dataset.map(cache_file_name=a )
return filename
# FILE_CONTENT + files
SCREAMING_SNAKE_CASE__ = "\\n Text data.\n Second line of data."
@pytest.fixture(scope="session" )
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :List[str] = tmp_path_factory.mktemp("data" ) / "file.txt"
SCREAMING_SNAKE_CASE_ :List[Any] = FILE_CONTENT
with open(a , "w" ) as f:
f.write(a )
return filename
@pytest.fixture(scope="session" )
def lowercase ( a ):
'''simple docstring'''
import bza
SCREAMING_SNAKE_CASE_ :List[Any] = tmp_path_factory.mktemp("data" ) / "file.txt.bz2"
SCREAMING_SNAKE_CASE_ :Union[str, Any] = bytes(a , "utf-8" )
with bza.open(a , "wb" ) as f:
f.write(a )
return path
@pytest.fixture(scope="session" )
def lowercase ( a ):
'''simple docstring'''
import gzip
SCREAMING_SNAKE_CASE_ :int = str(tmp_path_factory.mktemp("data" ) / "file.txt.gz" )
SCREAMING_SNAKE_CASE_ :Optional[Any] = bytes(a , "utf-8" )
with gzip.open(a , "wb" ) as f:
f.write(a )
return path
@pytest.fixture(scope="session" )
def lowercase ( a ):
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
SCREAMING_SNAKE_CASE_ :Dict = tmp_path_factory.mktemp("data" ) / "file.txt.lz4"
SCREAMING_SNAKE_CASE_ :str = bytes(a , "utf-8" )
with lza.frame.open(a , "wb" ) as f:
f.write(a )
return path
@pytest.fixture(scope="session" )
def lowercase ( a , a ):
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
SCREAMING_SNAKE_CASE_ :List[Any] = tmp_path_factory.mktemp("data" ) / "file.txt.7z"
with pyazr.SevenZipFile(a , "w" ) as archive:
archive.write(a , arcname=os.path.basename(a ) )
return path
@pytest.fixture(scope="session" )
def lowercase ( a , a ):
'''simple docstring'''
import tarfile
SCREAMING_SNAKE_CASE_ :List[Any] = tmp_path_factory.mktemp("data" ) / "file.txt.tar"
with tarfile.TarFile(a , "w" ) as f:
f.add(a , arcname=os.path.basename(a ) )
return path
@pytest.fixture(scope="session" )
def lowercase ( a ):
'''simple docstring'''
import lzma
SCREAMING_SNAKE_CASE_ :List[Any] = tmp_path_factory.mktemp("data" ) / "file.txt.xz"
SCREAMING_SNAKE_CASE_ :List[str] = bytes(a , "utf-8" )
with lzma.open(a , "wb" ) as f:
f.write(a )
return path
@pytest.fixture(scope="session" )
def lowercase ( a , a ):
'''simple docstring'''
import zipfile
SCREAMING_SNAKE_CASE_ :Optional[Any] = tmp_path_factory.mktemp("data" ) / "file.txt.zip"
with zipfile.ZipFile(a , "w" ) as f:
f.write(a , arcname=os.path.basename(a ) )
return path
@pytest.fixture(scope="session" )
def lowercase ( a ):
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
SCREAMING_SNAKE_CASE_ :List[Any] = tmp_path_factory.mktemp("data" ) / "file.txt.zst"
SCREAMING_SNAKE_CASE_ :List[str] = bytes(a , "utf-8" )
with zstd.open(a , "wb" ) as f:
f.write(a )
return path
@pytest.fixture(scope="session" )
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :List[str] = tmp_path_factory.mktemp("data" ) / "file.xml"
SCREAMING_SNAKE_CASE_ :Tuple = textwrap.dedent(
"\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>" )
with open(a , "w" ) as f:
f.write(a )
return filename
SCREAMING_SNAKE_CASE__ = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
SCREAMING_SNAKE_CASE__ = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
SCREAMING_SNAKE_CASE__ = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
SCREAMING_SNAKE_CASE__ = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
SCREAMING_SNAKE_CASE__ = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope="session" )
def lowercase ( ):
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="session" )
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :List[str] = datasets.Dataset.from_dict(a )
SCREAMING_SNAKE_CASE_ :Any = str(tmp_path_factory.mktemp("data" ) / "dataset.arrow" )
dataset.map(cache_file_name=a )
return path
@pytest.fixture(scope="session" )
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :List[str] = str(tmp_path_factory.mktemp("data" ) / "dataset.sqlite" )
with contextlib.closing(sqlitea.connect(a ) ) as con:
SCREAMING_SNAKE_CASE_ :Dict = con.cursor()
cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)" )
for item in DATA:
cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="session" )
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :int = str(tmp_path_factory.mktemp("data" ) / "dataset.csv" )
with open(a , "w" , newline="" ) as f:
SCREAMING_SNAKE_CASE_ :Dict = csv.DictWriter(a , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(a )
return path
@pytest.fixture(scope="session" )
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Optional[int] = str(tmp_path_factory.mktemp("data" ) / "dataset2.csv" )
with open(a , "w" , newline="" ) as f:
SCREAMING_SNAKE_CASE_ :str = csv.DictWriter(a , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(a )
return path
@pytest.fixture(scope="session" )
def lowercase ( a , a ):
'''simple docstring'''
import bza
SCREAMING_SNAKE_CASE_ :Tuple = tmp_path_factory.mktemp("data" ) / "dataset.csv.bz2"
with open(a , "rb" ) as f:
SCREAMING_SNAKE_CASE_ :Optional[Any] = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(a , "wb" ) as f:
f.write(a )
return path
@pytest.fixture(scope="session" )
def lowercase ( a , a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :str = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(a , "w" ) as f:
f.write(a , arcname=os.path.basename(a ) )
f.write(a , arcname=os.path.basename(a ) )
return path
@pytest.fixture(scope="session" )
def lowercase ( a , a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :List[Any] = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(a , "w" ) as f:
f.write(a , arcname=os.path.basename(csv_path.replace(".csv" , ".CSV" ) ) )
f.write(a , arcname=os.path.basename(csva_path.replace(".csv" , ".CSV" ) ) )
return path
@pytest.fixture(scope="session" )
def lowercase ( a , a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :List[Any] = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.csv.zip"
with zipfile.ZipFile(a , "w" ) as f:
f.write(a , arcname=os.path.join("main_dir" , os.path.basename(a ) ) )
f.write(a , arcname=os.path.join("main_dir" , os.path.basename(a ) ) )
return path
@pytest.fixture(scope="session" )
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.parquet" )
SCREAMING_SNAKE_CASE_ :str = pa.schema(
{
"col_1": pa.string(),
"col_2": pa.intaa(),
"col_3": pa.floataa(),
} )
with open(a , "wb" ) as f:
SCREAMING_SNAKE_CASE_ :int = pq.ParquetWriter(a , schema=a )
SCREAMING_SNAKE_CASE_ :Any = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(a ) )] for k in DATA[0]} , schema=a )
writer.write_table(a )
writer.close()
return path
@pytest.fixture(scope="session" )
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Tuple = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
SCREAMING_SNAKE_CASE_ :Tuple = {"data": DATA}
with open(a , "w" ) as f:
json.dump(a , a )
return path
@pytest.fixture(scope="session" )
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Tuple = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
SCREAMING_SNAKE_CASE_ :int = {"data": DATA_DICT_OF_LISTS}
with open(a , "w" ) as f:
json.dump(a , a )
return path
@pytest.fixture(scope="session" )
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Dict = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl" )
with open(a , "w" ) as f:
for item in DATA:
f.write(json.dumps(a ) + "\n" )
return path
@pytest.fixture(scope="session" )
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :List[str] = str(tmp_path_factory.mktemp("data" ) / "dataset2.jsonl" )
with open(a , "w" ) as f:
for item in DATA:
f.write(json.dumps(a ) + "\n" )
return path
@pytest.fixture(scope="session" )
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :int = str(tmp_path_factory.mktemp("data" ) / "dataset_312.jsonl" )
with open(a , "w" ) as f:
for item in DATA_312:
f.write(json.dumps(a ) + "\n" )
return path
@pytest.fixture(scope="session" )
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :List[str] = str(tmp_path_factory.mktemp("data" ) / "dataset-str.jsonl" )
with open(a , "w" ) as f:
for item in DATA_STR:
f.write(json.dumps(a ) + "\n" )
return path
@pytest.fixture(scope="session" )
def lowercase ( a , a ):
'''simple docstring'''
import gzip
SCREAMING_SNAKE_CASE_ :Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.txt.gz" )
with open(a , "rb" ) as orig_file:
with gzip.open(a , "wb" ) as zipped_file:
zipped_file.writelines(a )
return path
@pytest.fixture(scope="session" )
def lowercase ( a , a ):
'''simple docstring'''
import gzip
SCREAMING_SNAKE_CASE_ :List[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl.gz" )
with open(a , "rb" ) as orig_file:
with gzip.open(a , "wb" ) as zipped_file:
zipped_file.writelines(a )
return path
@pytest.fixture(scope="session" )
def lowercase ( a , a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Optional[int] = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.zip"
with zipfile.ZipFile(a , "w" ) as f:
f.write(a , arcname=os.path.basename(a ) )
f.write(a , arcname=os.path.basename(a ) )
return path
@pytest.fixture(scope="session" )
def lowercase ( a , a , a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Dict = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.zip"
with zipfile.ZipFile(a , "w" ) as f:
f.write(a , arcname=os.path.join("nested" , os.path.basename(a ) ) )
return path
@pytest.fixture(scope="session" )
def lowercase ( a , a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Tuple = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.jsonl.zip"
with zipfile.ZipFile(a , "w" ) as f:
f.write(a , arcname=os.path.join("main_dir" , os.path.basename(a ) ) )
f.write(a , arcname=os.path.join("main_dir" , os.path.basename(a ) ) )
return path
@pytest.fixture(scope="session" )
def lowercase ( a , a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :List[str] = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.tar"
with tarfile.TarFile(a , "w" ) as f:
f.add(a , arcname=os.path.basename(a ) )
f.add(a , arcname=os.path.basename(a ) )
return path
@pytest.fixture(scope="session" )
def lowercase ( a , a , a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Dict = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.tar"
with tarfile.TarFile(a , "w" ) as f:
f.add(a , arcname=os.path.join("nested" , os.path.basename(a ) ) )
return path
@pytest.fixture(scope="session" )
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Any = ["0", "1", "2", "3"]
SCREAMING_SNAKE_CASE_ :Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.txt" )
with open(a , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Any = ["0", "1", "2", "3"]
SCREAMING_SNAKE_CASE_ :str = str(tmp_path_factory.mktemp("data" ) / "dataset2.txt" )
with open(a , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Any = ["0", "1", "2", "3"]
SCREAMING_SNAKE_CASE_ :int = tmp_path_factory.mktemp("data" ) / "dataset.abc"
with open(a , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def lowercase ( a , a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :List[Any] = tmp_path_factory.mktemp("data" ) / "dataset.text.zip"
with zipfile.ZipFile(a , "w" ) as f:
f.write(a , arcname=os.path.basename(a ) )
f.write(a , arcname=os.path.basename(a ) )
return path
@pytest.fixture(scope="session" )
def lowercase ( a , a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :List[str] = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.text.zip"
with zipfile.ZipFile(a , "w" ) as f:
f.write(a , arcname=os.path.join("main_dir" , os.path.basename(a ) ) )
f.write(a , arcname=os.path.join("main_dir" , os.path.basename(a ) ) )
return path
@pytest.fixture(scope="session" )
def lowercase ( a , a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Dict = tmp_path_factory.mktemp("data" ) / "dataset.ext.zip"
with zipfile.ZipFile(a , "w" ) as f:
f.write(a , arcname=os.path.basename("unsupported.ext" ) )
f.write(a , arcname=os.path.basename("unsupported_2.ext" ) )
return path
@pytest.fixture(scope="session" )
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Tuple = "\n".join(["First", "Second\u2029with Unicode new line", "Third"] )
SCREAMING_SNAKE_CASE_ :List[str] = str(tmp_path_factory.mktemp("data" ) / "dataset_with_unicode_new_lines.txt" )
with open(a , "w" , encoding="utf-8" ) as f:
f.write(a )
return path
@pytest.fixture(scope="session" )
def lowercase ( ):
'''simple docstring'''
return os.path.join("tests" , "features" , "data" , "test_image_rgb.jpg" )
@pytest.fixture(scope="session" )
def lowercase ( ):
'''simple docstring'''
return os.path.join("tests" , "features" , "data" , "test_audio_44100.wav" )
@pytest.fixture(scope="session" )
def lowercase ( a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Tuple = tmp_path_factory.mktemp("data" ) / "dataset.img.zip"
with zipfile.ZipFile(a , "w" ) as f:
f.write(a , arcname=os.path.basename(a ) )
f.write(a , arcname=os.path.basename(a ).replace(".jpg" , "2.jpg" ) )
return path
@pytest.fixture(scope="session" )
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :int = tmp_path_factory.mktemp("data_dir" )
(data_dir / "subdir").mkdir()
with open(data_dir / "subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / "subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
# hidden file
with open(data_dir / "subdir" / ".test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / ".subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / ".subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
return data_dir
| 631
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def lowercase ( a , a=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Dict = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE_ :List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowercase ( a , a , a=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE_ :List[Any] = ""
else:
SCREAMING_SNAKE_CASE_ :List[Any] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_ :str = state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" )
SCREAMING_SNAKE_CASE_ :Dict = state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_ :Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE_ :Tuple = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE_ :Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE_ :Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE_ :str = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE_ :int = in_proj_bias[-config.hidden_size :]
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Optional[int] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(a , a )
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :int = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(a , a )
def lowercase ( a , a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Union[str, Any] = dct.pop(a )
SCREAMING_SNAKE_CASE_ :Dict = val
def lowercase ( a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :List[str] = ViTMSNConfig()
SCREAMING_SNAKE_CASE_ :Tuple = 1000
SCREAMING_SNAKE_CASE_ :List[str] = "datasets/huggingface/label-files"
SCREAMING_SNAKE_CASE_ :int = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE_ :str = json.load(open(hf_hub_download(a , a ) , "r" ) )
SCREAMING_SNAKE_CASE_ :List[str] = {int(a ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ :List[Any] = idalabel
SCREAMING_SNAKE_CASE_ :Tuple = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE_ :Optional[Any] = 384
SCREAMING_SNAKE_CASE_ :Optional[int] = 1536
SCREAMING_SNAKE_CASE_ :Optional[int] = 6
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE_ :str = 1024
SCREAMING_SNAKE_CASE_ :Optional[Any] = 4096
SCREAMING_SNAKE_CASE_ :Dict = 24
SCREAMING_SNAKE_CASE_ :Optional[Any] = 16
SCREAMING_SNAKE_CASE_ :Dict = 0.1
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE_ :Dict = 4
elif "l7" in checkpoint_url:
SCREAMING_SNAKE_CASE_ :List[Any] = 7
SCREAMING_SNAKE_CASE_ :List[Any] = 1024
SCREAMING_SNAKE_CASE_ :str = 4096
SCREAMING_SNAKE_CASE_ :Tuple = 24
SCREAMING_SNAKE_CASE_ :Dict = 16
SCREAMING_SNAKE_CASE_ :Optional[Any] = 0.1
SCREAMING_SNAKE_CASE_ :List[Any] = ViTMSNModel(a )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = torch.hub.load_state_dict_from_url(a , map_location="cpu" )["target_encoder"]
SCREAMING_SNAKE_CASE_ :Tuple = ViTImageProcessor(size=config.image_size )
remove_projection_head(a )
SCREAMING_SNAKE_CASE_ :int = create_rename_keys(a , base_model=a )
for src, dest in rename_keys:
rename_key(a , a , a )
read_in_q_k_v(a , a , base_model=a )
model.load_state_dict(a )
model.eval()
SCREAMING_SNAKE_CASE_ :List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE_ :Dict = Image.open(requests.get(a , stream=a ).raw )
SCREAMING_SNAKE_CASE_ :Optional[int] = ViTImageProcessor(
size=config.image_size , image_mean=a , image_std=a )
SCREAMING_SNAKE_CASE_ :Any = image_processor(images=a , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE_ :Tuple = model(**a )
SCREAMING_SNAKE_CASE_ :Optional[int] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE_ :Dict = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
SCREAMING_SNAKE_CASE_ :Dict = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE_ :Optional[int] = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE_ :List[str] = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
SCREAMING_SNAKE_CASE_ :List[str] = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , a , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(a )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 631
| 1
|
'''simple docstring'''
import functools
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : list[int] ) -> int:
# Validation
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ) or not all(isinstance(_UpperCAmelCase ,_UpperCAmelCase ) for day in days ):
raise ValueError("""The parameter days should be a list of integers""" )
if len(_UpperCAmelCase ) != 3 or not all(isinstance(_UpperCAmelCase ,_UpperCAmelCase ) for cost in costs ):
raise ValueError("""The parameter costs should be a list of three integers""" )
if len(_UpperCAmelCase ) == 0:
return 0
if min(_UpperCAmelCase ) <= 0:
raise ValueError("""All days elements should be greater than 0""" )
if max(_UpperCAmelCase ) >= 366:
raise ValueError("""All days elements should be less than 366""" )
_a : Optional[int] =set(_UpperCAmelCase )
@functools.cache
def dynamic_programming(_UpperCAmelCase : int ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) ,costs[1] + dynamic_programming(index + 7 ) ,costs[2] + dynamic_programming(index + 30 ) ,)
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 506
|
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
A__: Any = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
A__: Any = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.weight", F"encoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.bias", F"encoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"encoder.layers.{i}.fc1.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"encoder.layers.{i}.fc1.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"encoder.layers.{i}.fc2.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm1.weight", F"encoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.bias", F"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.weight", F"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.weight", F"decoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"decoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append(
(
F"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight",
F"decoder.layers.{i}.encoder_attn.out_proj.weight",
)
)
rename_keys.append(
(
F"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias",
F"decoder.layers.{i}.encoder_attn.out_proj.bias",
)
)
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"decoder.layers.{i}.fc1.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"decoder.layers.{i}.fc1.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"decoder.layers.{i}.fc2.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm1.weight", F"decoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.bias", F"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.weight", F"decoder.layers.{i}.encoder_attn_layer_norm.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.bias", F"decoder.layers.{i}.encoder_attn_layer_norm.bias")
)
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.weight", F"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"decoder.layers.{i}.final_layer_norm.bias"))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''),
('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
]
)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Any ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Dict ) -> Optional[Any]:
_a : List[str] =state_dict.pop(_UpperCAmelCase )
_a : Tuple =val
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> List[str]:
_a : Optional[Any] =OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_a : List[str] =key.replace("""backbone.0.body""" ,"""backbone.conv_encoder.model""" )
_a : int =value
else:
_a : Any =value
return new_state_dict
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ) -> int:
_a : List[str] =""""""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_a : int =state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
_a : Optional[Any] =state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
_a : str =in_proj_weight[:256, :]
_a : List[str] =in_proj_bias[:256]
_a : Optional[int] =in_proj_weight[256:512, :]
_a : List[str] =in_proj_bias[256:512]
_a : Optional[int] =in_proj_weight[-256:, :]
_a : Tuple =in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_a : Tuple =state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
_a : str =state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
_a : Optional[int] =in_proj_weight[:256, :]
_a : List[Any] =in_proj_bias[:256]
_a : Tuple =in_proj_weight[256:512, :]
_a : str =in_proj_bias[256:512]
_a : Any =in_proj_weight[-256:, :]
_a : int =in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_a : Any =state_dict.pop(
F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" )
_a : int =state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_a : int =in_proj_weight_cross_attn[:256, :]
_a : Any =in_proj_bias_cross_attn[:256]
_a : str =in_proj_weight_cross_attn[256:512, :]
_a : Dict =in_proj_bias_cross_attn[256:512]
_a : Any =in_proj_weight_cross_attn[-256:, :]
_a : Union[str, Any] =in_proj_bias_cross_attn[-256:]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : Any ) -> int:
_a , _a : Union[str, Any] =image.size
_a : Dict =max(_UpperCAmelCase ,_UpperCAmelCase )
_a : Union[str, Any] =800 if """detection""" in checkpoint_url else 1000
_a : Any =target_max_size / current_max_size
_a : int =image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Union[str, Any] ) -> int:
_a : Optional[Any] =F.to_tensor(_UpperCAmelCase )
_a : Tuple =F.normalize(_UpperCAmelCase ,mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] ,std=[0.2_2_9, 0.2_2_4, 0.2_2_5] )
return image
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[str] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ) -> Optional[int]:
logger.info("""Converting model...""" )
# load original state dict
_a : Dict =torch.hub.load_state_dict_from_url(_UpperCAmelCase ,map_location="""cpu""" )
# rename keys
for src, dest in rename_keys:
rename_key(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
_a : List[Any] =rename_backbone_keys(_UpperCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(_UpperCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_a : Dict ="""model."""
for key in state_dict.copy().keys():
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
_a : Any =state_dict.pop(_UpperCAmelCase )
_a : List[Any] =val
# create HuggingFace model and load state dict
_a : int =TableTransformerConfig(
backbone="""resnet18""" ,mask_loss_coefficient=1 ,dice_loss_coefficient=1 ,ce_loss_coefficient=1 ,bbox_loss_coefficient=5 ,giou_loss_coefficient=2 ,eos_coefficient=0.4 ,class_cost=1 ,bbox_cost=5 ,giou_cost=2 ,)
if "detection" in checkpoint_url:
_a : Union[str, Any] =15
_a : Tuple =2
_a : Optional[Any] ={0: """table""", 1: """table rotated"""}
_a : Tuple =idalabel
_a : List[Any] ={v: k for k, v in idalabel.items()}
else:
_a : Union[str, Any] =125
_a : int =6
_a : int ={
0: """table""",
1: """table column""",
2: """table row""",
3: """table column header""",
4: """table projected row header""",
5: """table spanning cell""",
}
_a : List[str] =idalabel
_a : Optional[int] ={v: k for k, v in idalabel.items()}
_a : Optional[int] =DetrImageProcessor(
format="""coco_detection""" ,max_size=800 if """detection""" in checkpoint_url else 1000 )
_a : Optional[Any] =TableTransformerForObjectDetection(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
# verify our conversion
_a : List[Any] ="""example_pdf.png""" if """detection""" in checkpoint_url else """example_table.png"""
_a : str =hf_hub_download(repo_id="""nielsr/example-pdf""" ,repo_type="""dataset""" ,filename=_UpperCAmelCase )
_a : Tuple =Image.open(_UpperCAmelCase ).convert("""RGB""" )
_a : Dict =normalize(resize(_UpperCAmelCase ,_UpperCAmelCase ) ).unsqueeze(0 )
_a : List[str] =model(_UpperCAmelCase )
if "detection" in checkpoint_url:
_a : Any =(1, 15, 3)
_a : int =torch.tensor(
[[-6.7_8_9_7, -1_6.9_9_8_5, 6.7_9_3_7], [-8.0_1_8_6, -2_2.2_1_9_2, 6.9_6_7_7], [-7.3_1_1_7, -2_1.0_7_0_8, 7.4_0_5_5]] )
_a : str =torch.tensor([[0.4_8_6_7, 0.1_7_6_7, 0.6_7_3_2], [0.6_7_1_8, 0.4_4_7_9, 0.3_8_3_0], [0.4_7_1_6, 0.1_7_6_0, 0.6_3_6_4]] )
else:
_a : str =(1, 125, 7)
_a : str =torch.tensor(
[[-1_8.1_4_3_0, -8.3_2_1_4, 4.8_2_7_4], [-1_8.4_6_8_5, -7.1_3_6_1, -4.2_6_6_7], [-2_6.3_6_9_3, -9.3_4_2_9, -4.9_9_6_2]] )
_a : int =torch.tensor([[0.4_9_8_3, 0.5_5_9_5, 0.9_4_4_0], [0.4_9_1_6, 0.6_3_1_5, 0.5_9_5_4], [0.6_1_0_8, 0.8_6_3_7, 0.1_1_3_5]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] ,_UpperCAmelCase ,atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] ,_UpperCAmelCase ,atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
image_processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
# Push model to HF hub
logger.info("""Pushing model to the hub...""" )
_a : Dict =(
"""microsoft/table-transformer-detection"""
if """detection""" in checkpoint_url
else """microsoft/table-transformer-structure-recognition"""
)
model.push_to_hub(_UpperCAmelCase )
image_processor.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
A__: int = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
type=str,
choices=[
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''',
],
help='''URL of the Table Transformer checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
A__: Dict = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 506
| 1
|
def a__ ( A__ = 5_0_0_0_0_0_0_0 ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = set()
SCREAMING_SNAKE_CASE_ : Optional[int] = int((limit - 2_4) ** (1 / 2) )
SCREAMING_SNAKE_CASE_ : Dict = set(range(3, prime_square_limit + 1, 2 ) )
primes.add(2 )
for p in range(3, prime_square_limit + 1, 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p, prime_square_limit + 1, A__ ) ) )
for primea in primes:
SCREAMING_SNAKE_CASE_ : int = primea * primea
for primea in primes:
SCREAMING_SNAKE_CASE_ : Dict = primea * primea * primea
if square + cube >= limit - 1_6:
break
for primea in primes:
SCREAMING_SNAKE_CASE_ : Optional[int] = primea * primea * primea * primea
SCREAMING_SNAKE_CASE_ : str = square + cube + tetr
if total >= limit:
break
ret.add(A__ )
return len(A__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 101
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ : Union[str, Any] ={
'configuration_blip_2': [
'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Blip2Config',
'Blip2QFormerConfig',
'Blip2VisionConfig',
],
'processing_blip_2': ['Blip2Processor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict =[
'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Blip2Model',
'Blip2QFormerModel',
'Blip2PreTrainedModel',
'Blip2ForConditionalGeneration',
'Blip2VisionModel',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowerCAmelCase__ : Union[str, Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 101
| 1
|
'''simple docstring'''
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : Optional[int] = (KDPMaDiscreteScheduler,)
_A : Dict = 10
def UpperCamelCase__ ( self : Optional[int] , **lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = {
"""num_train_timesteps""": 1_1_0_0,
"""beta_start""": 0.00_01,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**lowerCAmelCase__ )
return config
def UpperCamelCase__ ( self : int ):
"""simple docstring"""
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ )
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ )
def UpperCamelCase__ ( self : Tuple ):
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCAmelCase__ )
def UpperCamelCase__ ( self : int ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase__ )
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config(prediction_type="""v_prediction""" )
__SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
__SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_model()
__SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
__SCREAMING_SNAKE_CASE : Union[str, Any] = sample.to(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE : List[Any] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = output.prev_sample
__SCREAMING_SNAKE_CASE : List[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : List[Any] = torch.mean(torch.abs(lowerCAmelCase__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.69_34E-07 ) < 1E-2
assert abs(result_mean.item() - 6.11_12E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72E-07 ) < 1E-2
assert abs(result_mean.item() - 0.00_02 ) < 1E-3
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
if torch_device == "mps":
return
__SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : str = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE : Optional[Any] = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_model()
__SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
__SCREAMING_SNAKE_CASE : str = sample.to(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE : Tuple = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = output.prev_sample
__SCREAMING_SNAKE_CASE : int = torch.sum(torch.abs(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : int = torch.mean(torch.abs(lowerCAmelCase__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
if torch_device == "mps":
return
__SCREAMING_SNAKE_CASE : Dict = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = self.dummy_model()
__SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample_deter.to(lowerCAmelCase__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__SCREAMING_SNAKE_CASE : Dict = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = output.prev_sample
__SCREAMING_SNAKE_CASE : Dict = torch.sum(torch.abs(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : int = torch.mean(torch.abs(lowerCAmelCase__ ) )
if str(lowerCAmelCase__ ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
| 717
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _UpperCamelCase ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
_A : Optional[int] = ['''onnx''']
def __init__( self : List[str] , *lowerCAmelCase__ : Optional[int] , **lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
requires_backends(self , ["""onnx"""] )
@classmethod
def UpperCamelCase__ ( cls : Optional[int] , *lowerCAmelCase__ : int , **lowerCAmelCase__ : int ):
"""simple docstring"""
requires_backends(cls , ["""onnx"""] )
@classmethod
def UpperCamelCase__ ( cls : Tuple , *lowerCAmelCase__ : List[str] , **lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ["""onnx"""] )
| 178
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
A_ = None
A_ = logging.get_logger(__name__)
A_ = {"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"}
A_ = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
"tokenizer_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/tokenizer.json",
},
}
A_ = {
"google/rembert": 256,
}
A_ = "▁"
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = RemBertTokenizer
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_="[MASK]" , **SCREAMING_SNAKE_CASE_ , ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , remove_space=SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = remove_space
lowerCamelCase_ = keep_accents
lowerCamelCase_ = vocab_file
lowerCamelCase_ = False if not self.vocab_file else True
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error('Vocabulary path ({}) should be a directory'.format(SCREAMING_SNAKE_CASE_ ) )
return
lowerCamelCase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 42
|
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> float:
lowerCamelCase_ = x
lowerCamelCase_ = y
for step in range(__UpperCamelCase ): # noqa: B007
lowerCamelCase_ = a * a - b * b + x
lowerCamelCase_ = 2 * a * b + y
lowerCamelCase_ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _UpperCamelCase ( __UpperCamelCase ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def _UpperCamelCase ( __UpperCamelCase ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(__UpperCamelCase ,1 ,1 ) )
def _UpperCamelCase ( __UpperCamelCase = 8_00 ,__UpperCamelCase = 6_00 ,__UpperCamelCase = -0.6 ,__UpperCamelCase = 0 ,__UpperCamelCase = 3.2 ,__UpperCamelCase = 50 ,__UpperCamelCase = True ,) -> Image.Image:
lowerCamelCase_ = Image.new('RGB' ,(image_width, image_height) )
lowerCamelCase_ = img.load()
# loop through the image-coordinates
for image_x in range(__UpperCamelCase ):
for image_y in range(__UpperCamelCase ):
# determine the figure-coordinates based on the image-coordinates
lowerCamelCase_ = figure_width / image_width * image_height
lowerCamelCase_ = figure_center_x + (image_x / image_width - 0.5) * figure_width
lowerCamelCase_ = figure_center_y + (image_y / image_height - 0.5) * figure_height
lowerCamelCase_ = get_distance(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowerCamelCase_ = get_color_coded_rgb(__UpperCamelCase )
else:
lowerCamelCase_ = get_black_and_white_rgb(__UpperCamelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
A_ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 42
| 1
|
from manim import *
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = Rectangle(height=0.5 , width=0.5 )
__lowercase = Rectangle(height=0.25 , width=0.25 )
__lowercase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__lowercase = [mem.copy() for i in range(6 )]
__lowercase = [mem.copy() for i in range(6 )]
__lowercase = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
__lowercase = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
__lowercase = VGroup(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
__lowercase = Text('''CPU''' , font_size=24 )
__lowercase = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCAmelCase__ )
__lowercase = [mem.copy() for i in range(4 )]
__lowercase = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
__lowercase = Text('''GPU''' , font_size=24 )
__lowercase = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCAmelCase__ )
__lowercase = [mem.copy() for i in range(6 )]
__lowercase = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
__lowercase = Text('''Model''' , font_size=24 )
__lowercase = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
model.move_to([3, -1.0, 0] )
self.add(lowerCAmelCase__ )
__lowercase = []
__lowercase = []
__lowercase = []
for i, rect in enumerate(lowerCAmelCase__ ):
rect.set_stroke(lowerCAmelCase__ )
__lowercase = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowerCAmelCase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowerCAmelCase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=lowerCAmelCase__ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=lowerCAmelCase__ , buff=0.0 )
self.add(lowerCAmelCase__ )
model_cpu_arr.append(lowerCAmelCase__ )
self.add(*lowerCAmelCase__ , *lowerCAmelCase__ , *lowerCAmelCase__ )
__lowercase = [mem.copy() for i in range(6 )]
__lowercase = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
__lowercase = Text('''Loaded Checkpoint''' , font_size=24 )
__lowercase = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
checkpoint.move_to([3, 0.5, 0] )
self.add(lowerCAmelCase__ )
__lowercase = []
__lowercase = []
for i, rect in enumerate(lowerCAmelCase__ ):
__lowercase = fill.copy().set_fill(lowerCAmelCase__ , opacity=0.7 )
target.move_to(lowerCAmelCase__ )
ckpt_arr.append(lowerCAmelCase__ )
__lowercase = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(lowerCAmelCase__ )
self.add(*lowerCAmelCase__ , *lowerCAmelCase__ )
__lowercase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__lowercase = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(lowerCAmelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(lowerCAmelCase__ )
__lowercase = MarkupText(
F"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device." , font_size=24 , )
step_a.move_to([2, 2, 0] )
__lowercase = [meta_mem.copy() for i in range(6 )]
__lowercase = [meta_mem.copy() for i in range(6 )]
__lowercase = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
__lowercase = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
__lowercase = VGroup(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
__lowercase = Text('''Disk''' , font_size=24 )
__lowercase = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(lowerCAmelCase__ , run_time=3 ) , Write(lowerCAmelCase__ , run_time=1 ) , Create(lowerCAmelCase__ , run_time=1 ) )
__lowercase = []
for i, rect in enumerate(lowerCAmelCase__ ):
__lowercase = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(lowerCAmelCase__ , run_time=1.5 ) )
self.play(*lowerCAmelCase__ )
self.play(FadeOut(lowerCAmelCase__ ) )
__lowercase = MarkupText(F"Then, the checkpoint is removed from memory\nthrough garbage collection." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCAmelCase__ , run_time=3 ) )
self.play(
FadeOut(lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__ , *lowerCAmelCase__ ) , )
self.wait()
| 522
|
from __future__ import annotations
def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase ): # noqa: E741
"""simple docstring"""
while r - l > 1:
__lowercase = (l + r) // 2
if v[m] >= key:
__lowercase = m
else:
__lowercase = m # noqa: E741
return r
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
if len(lowercase ) == 0:
return 0
__lowercase = [0] * len(lowercase )
__lowercase = 1
__lowercase = v[0]
for i in range(1 , len(lowercase ) ):
if v[i] < tail[0]:
__lowercase = v[i]
elif v[i] > tail[length - 1]:
__lowercase = v[i]
length += 1
else:
__lowercase = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 522
| 1
|
def __snake_case ( lowerCAmelCase_ = 1_0_0 ) -> int:
SCREAMING_SNAKE_CASE__ = set()
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = n + 1 # maximum limit
for a in range(2 , lowerCAmelCase_ ):
for b in range(2 , lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = a**b # calculates the current power
collect_powers.add(lowerCAmelCase_ ) # adds the result to the set
return len(lowerCAmelCase_ )
if __name__ == "__main__":
print("""Number of terms """, solution(int(str(input()).strip())))
| 100
|
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
@slow
@require_torch
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : List[str] = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny')
a__ : Union[str, Any] = BertTokenizer.from_pretrained('bert-base-uncased')
a__ : Union[str, Any] = bertabert.config.encoder.vocab_size
a__ : Optional[Any] = tokenizer.sep_token_id
a__ : str = tokenizer.cls_token_id
a__ : Optional[Any] = 128
a__ : Tuple = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]')
a__ : Union[str, Any] = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]')
a__ : List[str] = train_dataset.select(range(32))
a__ : Dict = val_dataset.select(range(16))
a__ : Tuple = 4
def _map_to_encoder_decoder_inputs(lowercase):
# Tokenizer will automatically set [BOS] <text> [EOS]
a__ : List[Any] = tokenizer(batch['article'] , padding='max_length' , truncation=lowercase , max_length=512)
a__ : int = tokenizer(batch['highlights'] , padding='max_length' , truncation=lowercase , max_length=128)
a__ : Tuple = inputs.input_ids
a__ : int = inputs.attention_mask
a__ : str = outputs.input_ids
a__ : List[str] = outputs.input_ids.copy()
a__ : int = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
a__ : Optional[int] = outputs.attention_mask
assert all(len(lowercase) == 512 for x in inputs.input_ids)
assert all(len(lowercase) == 128 for x in outputs.input_ids)
return batch
def _compute_metrics(lowercase):
a__ : Optional[int] = pred.label_ids
a__ : str = pred.predictions
# all unnecessary tokens are removed
a__ : List[Any] = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase)
a__ : List[str] = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase)
a__ : Optional[Any] = sum([int(pred_str[i] == label_str[i]) for i in range(len(lowercase))]) / len(lowercase)
return {"accuracy": accuracy}
# map train dataset
a__ : Optional[Any] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowercase , batch_size=lowercase , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
a__ : Union[str, Any] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowercase , batch_size=lowercase , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
a__ : Tuple = self.get_auto_remove_tmp_dir()
a__ : List[Any] = SeqaSeqTrainingArguments(
output_dir=lowercase , per_device_train_batch_size=lowercase , per_device_eval_batch_size=lowercase , predict_with_generate=lowercase , evaluation_strategy='steps' , do_train=lowercase , do_eval=lowercase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
a__ : List[str] = SeqaSeqTrainer(
model=lowercase , args=lowercase , compute_metrics=_compute_metrics , train_dataset=lowercase , eval_dataset=lowercase , tokenizer=lowercase , )
# start training
trainer.train()
| 302
| 0
|
'''simple docstring'''
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowerCAmelCase_ ( unittest.TestCase ):
__lowerCamelCase : List[Any] = MODEL_FOR_MASKED_LM_MAPPING
__lowerCamelCase : List[Any] = TF_MODEL_FOR_MASKED_LM_MAPPING
def _snake_case ( self ) -> Union[str, Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _snake_case ( self ) -> str:
_lowerCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" )
_lowerCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=6 ) , [
{"sequence": "My name is grouped", "score": 2.1E-05, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1E-05, "token": 25506, "token_str": " accuser"},
] , )
_lowerCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=6 ) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1E-05,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1E-05,
"token": 25506,
"token_str": " accuser",
},
] , )
_lowerCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=6 ) , [
{"sequence": "My name is Clara", "score": 2E-05, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2E-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9E-05, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" )
_lowerCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=6 ) , [
{"sequence": "My name is Maul", "score": 2.2E-05, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2E-05, "token": 16416, "token_str": "ELS"},
] , )
_lowerCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=6 ) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2E-05,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2E-05, "token": 16416, "token_str": "ELS"},
] , )
_lowerCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=6 ) , [
{"sequence": "My name is Patrick", "score": 2.1E-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2E-05, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2E-05, "token": 13606, "token_str": " Clara"},
] , )
_lowerCAmelCase = unmasker("My name is <mask> <mask>" , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=6 ) , [
[
{
"score": 2.2E-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2E-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2E-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2E-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def _snake_case ( self ) -> Any:
_lowerCAmelCase = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" )
# convert model to fp16
pipe.model.half()
_lowerCAmelCase = pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
@slow
@require_torch
def _snake_case ( self ) -> Tuple:
_lowerCAmelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" )
self.run_large_test(_lowerCAmelCase )
@slow
@require_tf
def _snake_case ( self ) -> str:
_lowerCAmelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" )
self.run_large_test(_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase ) -> str:
_lowerCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
{"sequence": "My name is John", "score": 0.008, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.007, "token": 1573, "token_str": " Chris"},
] , )
_lowerCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.251,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.214,
"token": 12790,
"token_str": " Lyon",
},
] , )
_lowerCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
{"sequence": "My name is Patrick", "score": 0.005, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.000, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.000, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" )
_lowerCAmelCase = None
_lowerCAmelCase = None
self.run_pipeline_test(_lowerCAmelCase , [] )
@require_tf
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" )
_lowerCAmelCase = None
_lowerCAmelCase = None
self.run_pipeline_test(_lowerCAmelCase , [] )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
_lowerCAmelCase = FillMaskPipeline(model=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
_lowerCAmelCase = [
f'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
_lowerCAmelCase = fill_masker.tokenizer
_lowerCAmelCase = fill_masker.model
_lowerCAmelCase = fill_masker(
f'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
_lowerCAmelCase , [
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
] , )
_lowerCAmelCase = fill_masker([f'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
_lowerCAmelCase , [
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
] , )
_lowerCAmelCase = fill_masker([f'''This is a {tokenizer.mask_token}''', f'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
_lowerCAmelCase , [
[
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
],
[
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
],
] , )
with self.assertRaises(_lowerCAmelCase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_lowerCAmelCase ):
fill_masker("This is" )
self.run_test_top_k(_lowerCAmelCase , _lowerCAmelCase )
self.run_test_targets(_lowerCAmelCase , _lowerCAmelCase )
self.run_test_top_k_targets(_lowerCAmelCase , _lowerCAmelCase )
self.fill_mask_with_duplicate_targets_and_top_k(_lowerCAmelCase , _lowerCAmelCase )
self.fill_mask_with_multiple_masks(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
_lowerCAmelCase = tokenizer.get_vocab()
_lowerCAmelCase = sorted(vocab.keys() )[:2]
# Pipeline argument
_lowerCAmelCase = FillMaskPipeline(model=_lowerCAmelCase , tokenizer=_lowerCAmelCase , targets=_lowerCAmelCase )
_lowerCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_lowerCAmelCase , [
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
] , )
_lowerCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , _lowerCAmelCase )
_lowerCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(_lowerCAmelCase ) )
# Call argument
_lowerCAmelCase = FillMaskPipeline(model=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
_lowerCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=_lowerCAmelCase )
self.assertEqual(
_lowerCAmelCase , [
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
] , )
_lowerCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , _lowerCAmelCase )
_lowerCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(_lowerCAmelCase ) )
# Score equivalence
_lowerCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=_lowerCAmelCase )
_lowerCAmelCase = [top_mask["token_str"] for top_mask in outputs]
_lowerCAmelCase = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_lowerCAmelCase ) == set(_lowerCAmelCase ):
_lowerCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=_lowerCAmelCase )
_lowerCAmelCase = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_lowerCAmelCase ) , nested_simplify(_lowerCAmelCase ) )
# Raises with invalid
with self.assertRaises(_lowerCAmelCase ):
_lowerCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_lowerCAmelCase ):
_lowerCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[""] )
with self.assertRaises(_lowerCAmelCase ):
_lowerCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets="" )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_lowerCAmelCase = FillMaskPipeline(model=_lowerCAmelCase , tokenizer=_lowerCAmelCase , top_k=2 )
_lowerCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_lowerCAmelCase , [
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
] , )
_lowerCAmelCase = FillMaskPipeline(model=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
_lowerCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_lowerCAmelCase , [
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
] , )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , nested_simplify(_lowerCAmelCase ) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
_lowerCAmelCase = tokenizer.get_vocab()
_lowerCAmelCase = FillMaskPipeline(model=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
# top_k=2, ntargets=3
_lowerCAmelCase = sorted(vocab.keys() )[:3]
_lowerCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=_lowerCAmelCase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_lowerCAmelCase = [el["token_str"] for el in sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x["score"] , reverse=_lowerCAmelCase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_lowerCAmelCase ).issubset(_lowerCAmelCase ):
_lowerCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=_lowerCAmelCase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_lowerCAmelCase ) , nested_simplify(_lowerCAmelCase ) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
_lowerCAmelCase = FillMaskPipeline(model=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
_lowerCAmelCase = tokenizer.get_vocab()
# String duplicates + id duplicates
_lowerCAmelCase = sorted(vocab.keys() )[:3]
_lowerCAmelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_lowerCAmelCase = fill_masker(f'''My name is {tokenizer.mask_token}''' , targets=_lowerCAmelCase , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_lowerCAmelCase ) , 3 )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> int:
_lowerCAmelCase = FillMaskPipeline(model=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
_lowerCAmelCase = fill_masker(
f'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_lowerCAmelCase , [
[
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
],
[
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
],
[
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
{"sequence": ANY(_lowerCAmelCase ), "score": ANY(_lowerCAmelCase ), "token": ANY(_lowerCAmelCase ), "token_str": ANY(_lowerCAmelCase )},
],
] , )
| 489
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : List[str] = "bert-generation"
def __init__( self , _lowerCAmelCase=50358 , _lowerCAmelCase=1024 , _lowerCAmelCase=24 , _lowerCAmelCase=16 , _lowerCAmelCase=4096 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , _lowerCAmelCase=1 , _lowerCAmelCase="absolute" , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> Tuple:
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_act
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = position_embedding_type
_lowerCAmelCase = use_cache
| 489
| 1
|
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
_lowerCAmelCase: Any = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
def _lowercase( __a : str = "dhaka" , __a : int = 5 ):
a__ =min(__a , 50 ) # Prevent abuse!
a__ ={
'q': query,
'tbm': 'isch',
'hl': 'en',
'ijn': '0',
}
a__ =requests.get('https://www.google.com/search' , params=__a , headers=__a )
a__ =BeautifulSoup(html.text , 'html.parser' )
a__ =''.join(
re.findall(r'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) )
a__ =json.dumps(__a )
a__ =json.loads(__a )
a__ =re.findall(
r'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , __a , )
if not matched_google_image_data:
return 0
a__ =re.sub(
r'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(__a ) , )
a__ =re.findall(
r'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , __a , )
for index, fixed_full_res_image in enumerate(__a ):
if index >= max_images:
return index
a__ =bytes(__a , 'ascii' ).decode(
'unicode-escape' )
a__ =bytes(__a , 'ascii' ).decode(
'unicode-escape' )
a__ =urllib.request.build_opener()
a__ =[
(
'User-Agent',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582',
)
]
urllib.request.install_opener(__a )
a__ =f"""query_{query.replace(' ' , '_' )}"""
if not os.path.exists(__a ):
os.makedirs(__a )
urllib.request.urlretrieve( # noqa: S310
__a , f"""{path_name}/original_size_img_{index}.jpg""" )
return index
if __name__ == "__main__":
try:
_lowerCAmelCase: Optional[Any] = download_images_from_google_query(sys.argv[1])
print(F"""{image_count} images were downloaded to disk.""")
except IndexError:
print('Please provide a search term.')
raise
| 20
|
from manim import *
class lowercase_ (lowercase__ ):
def __UpperCamelCase ( self) -> List[Any]:
a__ =Rectangle(height=0.5 , width=0.5)
a__ =Rectangle(height=0.46 , width=0.46).set_stroke(width=0)
a__ =[mem.copy() for i in range(6)]
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =VGroup(lowercase_ , lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('CPU' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
cpu.move_to([-2.5, -0.5, 0])
self.add(lowercase_)
a__ =[mem.copy() for i in range(4)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('GPU' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
gpu.move_to([-1, -1, 0])
self.add(lowercase_)
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('Model' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
model.move_to([3, -1.0, 0])
self.add(lowercase_)
a__ =[]
for i, rect in enumerate(lowercase_):
rect.set_stroke(lowercase_)
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
a__ =Rectangle(height=0.46 / 4 , width=0.46 / 3).set_stroke(width=0.0).set_fill(lowercase_ , opacity=0.7)
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.02 , direction=lowercase_)
cpu_target.set_x(cpu_target.get_x() + 0.1)
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase_ , buff=0.0)
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase_ , buff=0.0)
self.add(lowercase_)
cpu_targs.append(lowercase_)
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('Loaded Checkpoint' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , aligned_edge=lowercase_ , buff=0.4)
checkpoint.move_to([3, 0.5, 0])
a__ =Square(side_length=2.2)
key.move_to([-5, 2, 0])
a__ =MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0])
self.add(lowercase_ , lowercase_)
a__ =MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(lowercase_ , DOWN * 2.4 , aligned_edge=key_text.get_left())
a__ =MarkupText(
F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0])
self.play(Write(lowercase_) , Write(lowercase_))
self.play(Write(lowercase_ , run_time=1) , Create(lowercase_ , run_time=1))
a__ =[]
a__ =[]
for i, rect in enumerate(lowercase_):
a__ =fill.copy().set_fill(lowercase_ , opacity=0.7)
target.move_to(lowercase_)
first_animations.append(GrowFromCenter(lowercase_ , run_time=1))
a__ =target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1])
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5])
second_animations.append(MoveToTarget(lowercase_ , run_time=1.5))
self.play(*lowercase_)
self.play(*lowercase_)
self.wait()
| 20
| 1
|
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
debug_launcher(test_script.main )
def _lowercase ( self : Tuple ) -> str:
"""simple docstring"""
debug_launcher(test_ops.main )
| 709
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : List[Any] = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """sew-d"""
def __init__( self : List[str] , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : Optional[int]=768 , UpperCamelCase__ : Tuple=12 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : int=3072 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : List[Any]=512 , UpperCamelCase__ : Any=256 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : str=("p2c", "c2p") , UpperCamelCase__ : List[Any]="layer_norm" , UpperCamelCase__ : int="gelu_python" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : int=0.0 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Optional[int]=1E-7 , UpperCamelCase__ : List[Any]=1E-5 , UpperCamelCase__ : List[str]="group" , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : Tuple=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , UpperCamelCase__ : str=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCamelCase__ : Optional[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Optional[int]=128 , UpperCamelCase__ : Tuple=16 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Dict=0.05 , UpperCamelCase__ : str=10 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Dict=10 , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : List[Any]="mean" , UpperCamelCase__ : int=False , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Optional[int]=256 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : Union[str, Any]=1 , UpperCamelCase__ : List[Any]=2 , **UpperCamelCase__ : str , ) -> Dict:
"""simple docstring"""
super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
__magic_name__ = hidden_size
__magic_name__ = feat_extract_norm
__magic_name__ = feat_extract_activation
__magic_name__ = list(UpperCamelCase__ )
__magic_name__ = list(UpperCamelCase__ )
__magic_name__ = list(UpperCamelCase__ )
__magic_name__ = conv_bias
__magic_name__ = num_conv_pos_embeddings
__magic_name__ = num_conv_pos_embedding_groups
__magic_name__ = len(self.conv_dim )
__magic_name__ = num_hidden_layers
__magic_name__ = intermediate_size
__magic_name__ = squeeze_factor
__magic_name__ = max_position_embeddings
__magic_name__ = position_buckets
__magic_name__ = share_att_key
__magic_name__ = relative_attention
__magic_name__ = norm_rel_ebd
__magic_name__ = list(UpperCamelCase__ )
__magic_name__ = hidden_act
__magic_name__ = num_attention_heads
__magic_name__ = hidden_dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = feat_proj_dropout
__magic_name__ = final_dropout
__magic_name__ = layer_norm_eps
__magic_name__ = feature_layer_norm_eps
__magic_name__ = initializer_range
__magic_name__ = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__magic_name__ = apply_spec_augment
__magic_name__ = mask_time_prob
__magic_name__ = mask_time_length
__magic_name__ = mask_time_min_masks
__magic_name__ = mask_feature_prob
__magic_name__ = mask_feature_length
__magic_name__ = mask_feature_min_masks
# ctc loss
__magic_name__ = ctc_loss_reduction
__magic_name__ = ctc_zero_infinity
# sequence classification
__magic_name__ = use_weighted_layer_sum
__magic_name__ = classifier_proj_size
@property
def _lowercase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 76
| 0
|
"""simple docstring"""
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Dict[str, torch.Tensor] ):
'''simple docstring'''
lowerCAmelCase = []
lowerCAmelCase = []
lowerCAmelCase = []
for rt in rc.restypes:
lowerCAmelCase = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
lowerCAmelCase = {name: i for i, name in enumerate(__lowercase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
lowerCAmelCase = torch.tensor(
__lowercase , dtype=torch.intaa , device=protein["""aatype"""].device , )
lowerCAmelCase = torch.tensor(
__lowercase , dtype=torch.intaa , device=protein["""aatype"""].device , )
lowerCAmelCase = torch.tensor(
__lowercase , dtype=torch.floataa , device=protein["""aatype"""].device , )
lowerCAmelCase = protein['''aatype'''].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
lowerCAmelCase = restype_atomaa_to_atomaa[protein_aatype]
lowerCAmelCase = restype_atomaa_mask[protein_aatype]
lowerCAmelCase = residx_atomaa_mask
lowerCAmelCase = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
lowerCAmelCase = restype_atomaa_to_atomaa[protein_aatype]
lowerCAmelCase = residx_atomaa_to_atomaa.long()
# create the corresponding mask
lowerCAmelCase = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
lowerCAmelCase = rc.restype_atoa[restype_letter]
lowerCAmelCase = rc.residue_atoms[restype_name]
for atom_name in atom_names:
lowerCAmelCase = rc.atom_order[atom_name]
lowerCAmelCase = 1
lowerCAmelCase = restype_atomaa_mask[protein_aatype]
lowerCAmelCase = residx_atomaa_mask
return protein
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Dict[str, torch.Tensor] ):
'''simple docstring'''
lowerCAmelCase = tree_map(lambda SCREAMING_SNAKE_CASE : torch.tensor(__lowercase , device=batch["""aatype"""].device ) , __lowercase , np.ndarray )
lowerCAmelCase = tensor_tree_map(lambda SCREAMING_SNAKE_CASE : np.array(__lowercase ) , make_atomaa_masks(__lowercase ) )
return out
| 532
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase : Any ={
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[Any] =[
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 206
| 0
|
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : Union[List[PIL.Image.Image], np.ndarray]
__A : Optional[List[bool]]
__A : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 709
|
'''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
_lowerCamelCase = logging.get_logger(__name__)
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : List[Any] =["pixel_values"]
def __init__( self ,_snake_case = True ,_snake_case = 1 / 2_55 ,_snake_case = True ,_snake_case = 8 ,**_snake_case ,):
super().__init__(**_snake_case )
UpperCAmelCase_ : Optional[Any] = do_rescale
UpperCAmelCase_ : int = rescale_factor
UpperCAmelCase_ : Optional[Any] = do_pad
UpperCAmelCase_ : List[str] = pad_size
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = None ,**_snake_case ):
return rescale(_snake_case ,scale=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = None ):
UpperCAmelCase_ , UpperCAmelCase_ : Any = get_image_size(_snake_case )
UpperCAmelCase_ : List[Any] = (old_height // size + 1) * size - old_height
UpperCAmelCase_ : Any = (old_width // size + 1) * size - old_width
return pad(_snake_case ,((0, pad_height), (0, pad_width)) ,mode="symmetric" ,data_format=_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = ChannelDimension.FIRST ,**_snake_case ,):
UpperCAmelCase_ : str = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : Optional[int] = do_pad if do_pad is not None else self.do_pad
UpperCAmelCase_ : Tuple = pad_size if pad_size is not None else self.pad_size
UpperCAmelCase_ : Optional[int] = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ : Tuple = [to_numpy_array(_snake_case ) for image in images]
if do_rescale:
UpperCAmelCase_ : Any = [self.rescale(image=_snake_case ,scale=_snake_case ) for image in images]
if do_pad:
UpperCAmelCase_ : Dict = [self.pad(_snake_case ,size=_snake_case ) for image in images]
UpperCAmelCase_ : Union[str, Any] = [to_channel_dimension_format(_snake_case ,_snake_case ) for image in images]
UpperCAmelCase_ : Dict = {"pixel_values": images}
return BatchFeature(data=_snake_case ,tensor_type=_snake_case )
| 323
| 0
|
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 85
|
import warnings
from ..trainer import Trainer
from ..utils import logging
__A : Any = logging.get_logger(__name__)
class A_ (a_ ):
def __init__( self , _A=None , **_A ):
'''simple docstring'''
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , _A , )
super().__init__(args=_A , **_A )
| 130
| 0
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 700
|
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class SCREAMING_SNAKE_CASE__ ( datasets.BeamBasedBuilder ):
def __UpperCAmelCase ( self : List[Any] ):
return datasets.DatasetInfo(
features=datasets.Features({"""content""": datasets.Value("""string""" )} ) , supervised_keys=SCREAMING_SNAKE_CASE_ , )
def __UpperCAmelCase ( self : Any , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_dummy_examples()} )]
def __UpperCAmelCase ( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE__ ( datasets.BeamBasedBuilder ):
def __UpperCAmelCase ( self : Optional[int] ):
return datasets.DatasetInfo(
features=datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) , supervised_keys=SCREAMING_SNAKE_CASE_ , )
def __UpperCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_nested_examples()} )
]
def __UpperCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(SCREAMING_SNAKE_CASE_ )
def _A ( ):
"""simple docstring"""
return [(i, {"content": content}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
def _A ( ):
"""simple docstring"""
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
@require_beam
def __UpperCAmelCase ( self : Any ):
lowerCamelCase__ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase__ = DummyBeamDataset(cache_dir=SCREAMING_SNAKE_CASE_ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(SCREAMING_SNAKE_CASE_ , builder.name , """default""" , """0.0.0""" , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
lowerCamelCase__ = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , SCREAMING_SNAKE_CASE_ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , SCREAMING_SNAKE_CASE_ )
self.assertDictEqual(dset["""train"""][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def __UpperCAmelCase ( self : str ):
import apache_beam as beam
lowerCamelCase__ = beam.io.parquetio.WriteToParquet
lowerCamelCase__ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase__ = DummyBeamDataset(cache_dir=SCREAMING_SNAKE_CASE_ , beam_runner="""DirectRunner""" )
with patch("""apache_beam.io.parquetio.WriteToParquet""" ) as write_parquet_mock:
lowerCamelCase__ = partial(SCREAMING_SNAKE_CASE_ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
SCREAMING_SNAKE_CASE_ , builder.name , """default""" , """0.0.0""" , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
SCREAMING_SNAKE_CASE_ , builder.name , """default""" , """0.0.0""" , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
lowerCamelCase__ = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , SCREAMING_SNAKE_CASE_ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , SCREAMING_SNAKE_CASE_ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["""train"""]["""content"""] ) , sorted(["""foo""", """bar""", """foobar"""] ) )
self.assertTrue(
os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def __UpperCAmelCase ( self : Dict ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase__ = DummyBeamDataset(cache_dir=SCREAMING_SNAKE_CASE_ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __UpperCAmelCase ( self : Tuple ):
lowerCamelCase__ = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase__ = NestedBeamDataset(cache_dir=SCREAMING_SNAKE_CASE_ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(SCREAMING_SNAKE_CASE_ , builder.name , """default""" , """0.0.0""" , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) )
lowerCamelCase__ = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , SCREAMING_SNAKE_CASE_ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , SCREAMING_SNAKE_CASE_ )
self.assertDictEqual(dset["""train"""][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
| 258
| 0
|
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
if not is_sharded:
__a : Tuple = os.path.abspath(SCREAMING_SNAKE_CASE__ )
logger.info(f'''Loading PyTorch weights from {pt_path}''' )
__a : int = torch.load(SCREAMING_SNAKE_CASE__ , map_location='cpu' )
logger.info(f'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' )
__a : Optional[int] = convert_pytorch_state_dict_to_flax(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
__a : List[str] = convert_pytorch_sharded_state_dict_to_flax(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return flax_state_dict
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ):
def is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE__ ) -> bool:
return len(set(SCREAMING_SNAKE_CASE__ ) & {key, (model_prefix,) + key} ) > 0
# layer norm
__a : Optional[Any] = pt_tuple_key[:-1] + ('scale',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
__a : Tuple = pt_tuple_key[:-1] + ('mean',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
__a : List[Any] = pt_tuple_key[:-1] + ('var',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE__ ):
return renamed_pt_tuple_key, pt_tensor
# embedding
__a : Optional[int] = pt_tuple_key[:-1] + ('embedding',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE__ ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
__a : str = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE__ ):
__a : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__a : List[str] = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE__ ):
__a : List[str] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__a : List[Any] = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__a : Union[str, Any] = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
__a : int = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
__a : Union[str, Any] = pt_tuple_key[-2] + '_g'
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
__a : Tuple = pt_tuple_key[-2] + '_v'
if name is not None:
__a : Union[str, Any] = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# convert pytorch tensor to numpy
__a : List[str] = {k: v.numpy() for k, v in pt_state_dict.items()}
__a : Union[str, Any] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
__a : Tuple = flax_model.params['params']
else:
__a : List[Any] = flax_model.params
__a : Any = flatten_dict(SCREAMING_SNAKE_CASE__ )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__a : List[Any] = flatten_dict(flax_model.params['batch_stats'] )
random_flax_state_dict.update(SCREAMING_SNAKE_CASE__ )
__a : List[str] = {}
__a : str = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
__a : Tuple = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__a : Tuple = tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
__a : Optional[Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__a : Any = pt_tuple_key[1:]
# Correctly rename weight parameters
__a , __a : int = rename_key_and_reshape_tensor(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# add model prefix if necessary
__a : List[str] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__a : int = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
__a : str = jnp.asarray(SCREAMING_SNAKE_CASE__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
continue
# also add unexpected weight so that warning is thrown
__a : Tuple = jnp.asarray(SCREAMING_SNAKE_CASE__ )
else:
# also add unexpected weight so that warning is thrown
__a : Optional[Any] = jnp.asarray(SCREAMING_SNAKE_CASE__ )
return unflatten_dict(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
import torch
# Load the index
__a : int = {}
for shard_file in shard_filenames:
# load using msgpack utils
__a : Any = torch.load(SCREAMING_SNAKE_CASE__ )
__a : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()}
__a : Tuple = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__a : Tuple = flax_model.params['params']
__a : str = flatten_dict(SCREAMING_SNAKE_CASE__ )
random_flax_state_dict.update(flatten_dict(flax_model.params['batch_stats'] ) )
else:
__a : Union[str, Any] = flax_model.params
__a : Union[str, Any] = flatten_dict(SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
__a : str = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__a : Dict = tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
__a : Union[str, Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__a : int = pt_tuple_key[1:]
# Correctly rename weight parameters
__a , __a : Any = rename_key_and_reshape_tensor(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# add model prefix if necessary
__a : Optional[Any] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__a : str = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
__a : Optional[int] = jnp.asarray(SCREAMING_SNAKE_CASE__ )
continue
if "var" in flax_key[-1]:
__a : Optional[int] = jnp.asarray(SCREAMING_SNAKE_CASE__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
continue
# also add unexpected weight so that warning is thrown
__a : Dict = jnp.asarray(SCREAMING_SNAKE_CASE__ )
else:
# also add unexpected weight so that warning is thrown
__a : List[str] = jnp.asarray(SCREAMING_SNAKE_CASE__ )
return unflatten_dict(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a : Union[str, Any] = os.path.abspath(SCREAMING_SNAKE_CASE__ )
logger.info(f'''Loading Flax weights from {flax_checkpoint_path}''' )
# import correct flax class
__a : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE__ , 'Flax' + model.__class__.__name__ )
# load flax weight dict
with open(SCREAMING_SNAKE_CASE__ , 'rb' ) as state_f:
try:
__a : Dict = from_bytes(SCREAMING_SNAKE_CASE__ , state_f.read() )
except UnpicklingError:
raise EnvironmentError(f'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
__a : str = flatten_dict(jax.tree_util.tree_map(lambda SCREAMING_SNAKE_CASE__ : x.dtype == jnp.bfloataa , SCREAMING_SNAKE_CASE__ ) ).values()
if any(SCREAMING_SNAKE_CASE__ ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
__a : List[Any] = jax.tree_util.tree_map(
lambda SCREAMING_SNAKE_CASE__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , SCREAMING_SNAKE_CASE__ )
__a : Optional[int] = flatten_dict(SCREAMING_SNAKE_CASE__ )
__a : List[Any] = pt_model.state_dict()
__a : Dict = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
__a : Optional[Any] = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
__a : Union[str, Any] = []
__a : Any = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__a : List[str] = flax_key_tuple[0] == pt_model.base_model_prefix
__a : Tuple = '.'.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
__a : Union[str, Any] = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
__a : str = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(SCREAMING_SNAKE_CASE__ ) not in pt_model_dict:
# conv layer
__a : Any = flax_key_tuple[:-1] + ('weight',)
__a : Optional[int] = jnp.transpose(SCREAMING_SNAKE_CASE__ , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(SCREAMING_SNAKE_CASE__ ) not in pt_model_dict:
# linear layer
__a : Dict = flax_key_tuple[:-1] + ('weight',)
__a : Tuple = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__a : List[str] = flax_key_tuple[:-1] + ('weight',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
__a : Optional[int] = flax_key_tuple[:-1] + ('running_mean',)
elif "var" in flax_key_tuple[-1]:
__a : List[Any] = flax_key_tuple[:-1] + ('running_var',)
if "batch_stats" in flax_state:
__a : Dict = '.'.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
__a : Optional[int] = '.'.join(SCREAMING_SNAKE_CASE__ )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
__a : List[Any] = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
__a : List[str] = key.split('.' )
__a : Optional[Any] = None
if key_components[-3::2] == ["parametrizations", "original0"]:
__a : Tuple = key_components[-2] + '_g'
elif key_components[-3::2] == ["parametrizations", "original1"]:
__a : List[str] = key_components[-2] + '_v'
if name is not None:
__a : List[Any] = key_components[:-3] + [name]
__a : Union[str, Any] = '.'.join(SCREAMING_SNAKE_CASE__ )
__a : Dict = key
if flax_key in special_pt_names:
__a : Optional[int] = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
f'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
__a : Optional[int] = np.asarray(SCREAMING_SNAKE_CASE__ ) if not isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ) else flax_tensor
__a : Optional[int] = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
# remove from missing keys
missing_keys.remove(SCREAMING_SNAKE_CASE__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(SCREAMING_SNAKE_CASE__ )
pt_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# re-transform missing_keys to list
__a : Any = list(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
f''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
f''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
f''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
else:
logger.warning(f'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
logger.warning(
f'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
f''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
' use it for predictions and inference.' )
else:
logger.warning(
f'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'''
'If your task is similar to the task the model of the checkpoint was trained on, '
f'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' )
return pt_model
| 597
|
'''simple docstring'''
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a : Optional[int] = XCLIPTextConfig()
# derive patch size from model name
__a : List[str] = model_name.find('patch' )
__a : int = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] )
__a : Optional[Any] = XCLIPVisionConfig(patch_size=SCREAMING_SNAKE_CASE__ , num_frames=SCREAMING_SNAKE_CASE__ )
if "large" in model_name:
__a : List[Any] = 768
__a : List[str] = 3072
__a : str = 12
__a : str = 1024
__a : Optional[int] = 4096
__a : Optional[Any] = 16
__a : str = 24
__a : List[str] = 768
__a : Union[str, Any] = 3072
if model_name == "xclip-large-patch14-16-frames":
__a : Optional[int] = 336
__a : Dict = XCLIPConfig.from_text_vision_configs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "large" in model_name:
__a : Any = 768
return config
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ):
# text encoder
if name == "token_embedding.weight":
__a : str = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' )
if name == "positional_embedding":
__a : int = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "ln_1" in name:
__a : Optional[Any] = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
__a : Optional[Any] = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
__a : str = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
__a : Union[str, Any] = name.replace('c_proj' , 'fc2' )
if name.startswith('transformer.resblocks' ):
__a : List[str] = name.replace('transformer.resblocks' , 'text_model.encoder.layers' )
if "attn.out_proj" in name and "message" not in name:
__a : Dict = name.replace('attn.out_proj' , 'self_attn.out_proj' )
if "ln_final" in name:
__a : str = name.replace('ln_final' , 'text_model.final_layer_norm' )
# visual encoder
if name == "visual.class_embedding":
__a : int = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' )
if name == "visual.positional_embedding":
__a : Tuple = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' )
if name.startswith('visual.transformer.resblocks' ):
__a : Optional[Any] = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' )
if "visual.conv1" in name:
__a : Tuple = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' )
if "visual.ln_pre" in name:
__a : int = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' )
if "visual.ln_post" in name:
__a : int = name.replace('visual.ln_post' , 'vision_model.post_layernorm' )
if "visual.proj" in name:
__a : List[str] = name.replace('visual.proj' , 'visual_projection.weight' )
if "text_projection" in name:
__a : Tuple = name.replace('text_projection' , 'text_projection.weight' )
# things on top
if "prompts_visual_proj" in name:
__a : int = name.replace('prompts_visual_proj' , 'prompts_visual_projection' )
if "prompts_visual_ln" in name:
__a : Dict = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' )
# mit
if name == "mit.positional_embedding":
__a : Union[str, Any] = name.replace('positional' , 'position' )
if name.startswith('mit.resblocks' ):
__a : List[Any] = name.replace('mit.resblocks' , 'mit.encoder.layers' )
# prompts generator
if name.startswith('prompts_generator.norm' ):
__a : Any = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' )
return name
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
for key in orig_state_dict.copy().keys():
__a : Tuple = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "attn.in_proj" in key:
__a : Dict = key.split('.' )
if key.startswith('visual' ):
__a : Optional[Any] = key_split[3]
__a : int = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__a : List[Any] = val[
:dim, :
]
__a : List[Any] = val[
dim : dim * 2, :
]
__a : List[str] = val[
-dim:, :
]
else:
__a : Dict = val[
:dim
]
__a : Dict = val[
dim : dim * 2
]
__a : List[Any] = val[
-dim:
]
else:
if "weight" in key:
__a : Optional[int] = val[
:dim, :
]
__a : str = val[
dim : dim * 2, :
]
__a : str = val[
-dim:, :
]
else:
__a : Union[str, Any] = val[:dim]
__a : List[Any] = val[
dim : dim * 2
]
__a : Optional[int] = val[-dim:]
elif key.startswith('mit' ):
__a : Any = key_split[2]
__a : Dict = config.vision_config.mit_hidden_size
if "weight" in key:
__a : List[Any] = val[:dim, :]
__a : Union[str, Any] = val[dim : dim * 2, :]
__a : Tuple = val[-dim:, :]
else:
__a : Optional[int] = val[:dim]
__a : str = val[dim : dim * 2]
__a : Dict = val[-dim:]
else:
__a : Union[str, Any] = key_split[2]
__a : Any = config.text_config.hidden_size
if "weight" in key:
__a : Tuple = val[:dim, :]
__a : int = val[
dim : dim * 2, :
]
__a : List[Any] = val[-dim:, :]
else:
__a : Dict = val[:dim]
__a : Dict = val[
dim : dim * 2
]
__a : Union[str, Any] = val[-dim:]
else:
__a : Dict = rename_key(SCREAMING_SNAKE_CASE__ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__a : str = val.T
__a : Union[str, Any] = val
return orig_state_dict
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ):
if num_frames == 8:
__a : Optional[Any] = 'eating_spaghetti_8_frames.npy'
elif num_frames == 16:
__a : List[Any] = 'eating_spaghetti.npy'
elif num_frames == 32:
__a : Union[str, Any] = 'eating_spaghetti_32_frames.npy'
__a : List[str] = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename=SCREAMING_SNAKE_CASE__ , repo_type='dataset' , )
__a : List[str] = np.load(SCREAMING_SNAKE_CASE__ )
return list(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=False ):
__a : Tuple = {
# fully supervised kinetics-400 checkpoints
'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth',
'xclip-base-patch32-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'
),
'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth',
'xclip-base-patch16-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'
),
'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb',
'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f',
# fully supervised kinetics-600 checkpoints
'xclip-base-patch16-kinetics-600': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'
),
'xclip-base-patch16-kinetics-600-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'
),
'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be',
# few shot
'xclip-base-patch16-hmdb-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'
),
'xclip-base-patch16-hmdb-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'
),
'xclip-base-patch16-hmdb-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'
),
'xclip-base-patch16-hmdb-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'
),
'xclip-base-patch16-ucf-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'
),
'xclip-base-patch16-ucf-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'
),
'xclip-base-patch16-ucf-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'
),
'xclip-base-patch16-ucf-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'
),
# zero shot
'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth',
}
__a : str = model_to_url[model_name]
__a : Optional[Any] = 8
if "16-frames" in model_name:
__a : List[Any] = 16
elif "shot" in model_name:
__a : List[Any] = 32
__a : Optional[int] = get_xclip_config(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : List[Any] = XCLIPModel(SCREAMING_SNAKE_CASE__ )
model.eval()
if "drive" in checkpoint_url:
__a : Union[str, Any] = 'pytorch_model.bin'
gdown.cached_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , quiet=SCREAMING_SNAKE_CASE__ )
__a : Tuple = torch.load(SCREAMING_SNAKE_CASE__ , map_location='cpu' )['model']
else:
__a : Any = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ )['model']
__a : Dict = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : Dict = XCLIPModel(SCREAMING_SNAKE_CASE__ )
__a , __a : Any = model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__a : Dict = 336 if model_name == 'xclip-large-patch14-16-frames' else 224
__a : Tuple = VideoMAEImageProcessor(size=SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' )
__a : str = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' )
__a : int = XCLIPProcessor(image_processor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
__a : Dict = prepare_video(SCREAMING_SNAKE_CASE__ )
__a : Any = processor(
text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=SCREAMING_SNAKE_CASE__ , return_tensors='pt' , padding=SCREAMING_SNAKE_CASE__ )
print('Shape of pixel values:' , inputs.pixel_values.shape )
with torch.no_grad():
__a : List[Any] = model(**SCREAMING_SNAKE_CASE__ )
# Verify outputs
__a : int = outputs.logits_per_video
__a : Optional[int] = logits_per_video.softmax(dim=1 )
print('Probs:' , SCREAMING_SNAKE_CASE__ )
# kinetics-400
if model_name == "xclip-base-patch32":
__a : str = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
__a : Optional[int] = torch.tensor([[7.09_99E-04, 9.98_83E-01, 4.55_80E-04]] )
elif model_name == "xclip-base-patch16":
__a : Any = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
__a : Tuple = torch.tensor([[7.69_37E-04, 9.97_28E-01, 1.94_73E-03]] )
elif model_name == "xclip-large-patch14":
__a : Tuple = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
__a : Tuple = torch.tensor([[3.38_77E-04, 9.99_37E-01, 2.88_88E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__a : List[Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__a : str = torch.tensor([[3.85_54E-04, 9.99_29E-01, 3.27_54E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
__a : List[Any] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__a : Optional[int] = torch.tensor([[7.18_90E-06, 9.99_94E-01, 5.65_59E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__a : Union[str, Any] = torch.tensor([[1.03_20E-05, 9.99_93E-01, 6.24_35E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__a : Dict = torch.tensor([[4.13_77E-06, 9.99_90E-01, 9.83_86E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__a : str = torch.tensor([[4.13_47E-05, 9.99_62E-01, 3.34_11E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
__a : str = torch.tensor([[8.58_57E-05, 9.99_28E-01, 6.32_91E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
__a : str = torch.tensor([[8.58_57E-05, 9.99_28E-01, 6.32_91E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
__a : Optional[int] = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
__a : Any = torch.tensor([[9.82_19E-04, 9.95_93E-01, 3.08_63E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__a : str = torch.tensor([[3.50_82E-04, 9.97_85E-01, 1.79_66E-03]] )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print('Pushing model, processor and slow tokenizer files to the hub...' )
model.push_to_hub(SCREAMING_SNAKE_CASE__ , organization='nielsr' )
processor.push_to_hub(SCREAMING_SNAKE_CASE__ , organization='nielsr' )
slow_tokenizer.push_to_hub(SCREAMING_SNAKE_CASE__ , organization='nielsr' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="xclip-base-patch32",
type=str,
help="Name of the model.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 597
| 1
|
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase_ ( a__ , unittest.TestCase ):
__UpperCAmelCase = LongformerTokenizer
__UpperCAmelCase = True
__UpperCAmelCase = LongformerTokenizerFast
__UpperCAmelCase = True
def __a ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase__ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
UpperCamelCase__ = dict(zip(A__ , range(len(A__ ) ) ) )
UpperCamelCase__ = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
UpperCamelCase__ = {"""unk_token""": """<unk>"""}
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(A__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(A__ ) )
def __a ( self , **a ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A__ )
def __a ( self , **a ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **A__ )
def __a ( self , a ):
UpperCamelCase__ = """lower newer"""
UpperCamelCase__ = """lower newer"""
return input_text, output_text
def __a ( self ):
UpperCamelCase__ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase__ = """lower newer"""
UpperCamelCase__ = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
UpperCamelCase__ = tokenizer.tokenize(A__ ) # , add_prefix_space=True)
self.assertListEqual(A__ , A__ )
UpperCamelCase__ = tokens + [tokenizer.unk_token]
UpperCamelCase__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , A__ )
def __a ( self ):
UpperCamelCase__ = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=A__ ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=A__ ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def __a ( self ):
UpperCamelCase__ = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" )
UpperCamelCase__ = tokenizer.encode("sequence builders" , add_special_tokens=A__ )
UpperCamelCase__ = tokenizer.encode("multi-sequence build" , add_special_tokens=A__ )
UpperCamelCase__ = tokenizer.encode(
"sequence builders" , add_special_tokens=A__ , add_prefix_space=A__ )
UpperCamelCase__ = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=A__ , add_prefix_space=A__ )
UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(A__ )
UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(A__ , A__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __a ( self ):
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = """Encode this sequence."""
UpperCamelCase__ = tokenizer.byte_encoder[""" """.encode("utf-8" )[0]]
# Testing encoder arguments
UpperCamelCase__ = tokenizer.encode(A__ , add_special_tokens=A__ , add_prefix_space=A__ )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(A__ , A__ )
UpperCamelCase__ = tokenizer.encode(A__ , add_special_tokens=A__ , add_prefix_space=A__ )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(A__ , A__ )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
UpperCamelCase__ = tokenizer.encode(A__ , add_special_tokens=A__ )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(A__ , A__ )
# Testing spaces after special tokens
UpperCamelCase__ = """<mask>"""
tokenizer.add_special_tokens(
{"mask_token": AddedToken(A__ , lstrip=A__ , rstrip=A__ )} ) # mask token has a left space
UpperCamelCase__ = tokenizer.convert_tokens_to_ids(A__ )
UpperCamelCase__ = """Encode <mask> sequence"""
UpperCamelCase__ = """Encode <mask>sequence"""
UpperCamelCase__ = tokenizer.encode(A__ )
UpperCamelCase__ = encoded.index(A__ )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(A__ , A__ )
UpperCamelCase__ = tokenizer.encode(A__ )
UpperCamelCase__ = encoded.index(A__ )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(A__ , A__ )
def __a ( self ):
pass
def __a ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
UpperCamelCase__ = self.tokenizer_class.from_pretrained(A__ , **A__ )
UpperCamelCase__ = """A, <mask> AllenNLP sentence."""
UpperCamelCase__ = tokenizer_r.encode_plus(A__ , add_special_tokens=A__ , return_token_type_ids=A__ )
UpperCamelCase__ = tokenizer_p.encode_plus(A__ , add_special_tokens=A__ , return_token_type_ids=A__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
UpperCamelCase__ = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
UpperCamelCase__ = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
A__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
A__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def __a ( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=A__ , add_prefix_space=A__ , trim_offsets=A__ )
UpperCamelCase__ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
UpperCamelCase__ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , A__ )
self.assertEqual(post_processor_state["add_prefix_space"] , A__ )
self.assertEqual(post_processor_state["trim_offsets"] , A__ )
def __a ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase__ = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase__ = f'''{text_of_1_token} {text_of_1_token}'''
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
A__ , use_fast=A__ , add_prefix_space=A__ , trim_offsets=A__ )
UpperCamelCase__ = tokenizer_r(A__ , return_offsets_mapping=A__ , add_special_tokens=A__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A__ ) + 1, len(A__ ) + 1 + len(A__ )) , )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
A__ , use_fast=A__ , add_prefix_space=A__ , trim_offsets=A__ )
UpperCamelCase__ = tokenizer_r(A__ , return_offsets_mapping=A__ , add_special_tokens=A__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A__ ) + 1, len(A__ ) + 1 + len(A__ )) , )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
A__ , use_fast=A__ , add_prefix_space=A__ , trim_offsets=A__ )
UpperCamelCase__ = tokenizer_r(A__ , return_offsets_mapping=A__ , add_special_tokens=A__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A__ ), len(A__ ) + 1 + len(A__ )) , )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
A__ , use_fast=A__ , add_prefix_space=A__ , trim_offsets=A__ )
UpperCamelCase__ = tokenizer_r(A__ , return_offsets_mapping=A__ , add_special_tokens=A__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A__ ), len(A__ ) + 1 + len(A__ )) , )
UpperCamelCase__ = f''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
A__ , use_fast=A__ , add_prefix_space=A__ , trim_offsets=A__ )
UpperCamelCase__ = tokenizer_r(A__ , return_offsets_mapping=A__ , add_special_tokens=A__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(A__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A__ ) + 1, 1 + len(A__ ) + 1 + len(A__ )) , )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
A__ , use_fast=A__ , add_prefix_space=A__ , trim_offsets=A__ )
UpperCamelCase__ = tokenizer_r(A__ , return_offsets_mapping=A__ , add_special_tokens=A__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A__ ), 1 + len(A__ ) + 1 + len(A__ )) , )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
A__ , use_fast=A__ , add_prefix_space=A__ , trim_offsets=A__ )
UpperCamelCase__ = tokenizer_r(A__ , return_offsets_mapping=A__ , add_special_tokens=A__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A__ ), 1 + len(A__ ) + 1 + len(A__ )) , )
| 720
|
'''simple docstring'''
import os
def _UpperCamelCase ( ) -> Optional[int]:
'''simple docstring'''
with open(os.path.dirname(__A ) + "/p022_names.txt" ) as file:
UpperCamelCase__ = str(file.readlines()[0] )
UpperCamelCase__ = names.replace("\"" , "" ).split("," )
names.sort()
UpperCamelCase__ = 0
UpperCamelCase__ = 0
for i, name in enumerate(__A ):
for letter in name:
name_score += ord(__A ) - 64
total_score += (i + 1) * name_score
UpperCamelCase__ = 0
return total_score
if __name__ == "__main__":
print(solution())
| 223
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase__ = logging.get_logger(__name__)
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = ['pixel_values']
def __init__( self : Dict , _A : bool = True , _A : int = 32 , _A : Optional[Any]=PILImageResampling.BILINEAR , _A : bool = True , **_A : List[Any] , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = do_resize
UpperCAmelCase__ : int = do_rescale
UpperCAmelCase__ : List[Any] = size_divisor
UpperCAmelCase__ : List[Any] = resample
super().__init__(**_A )
def lowercase_ ( self : Optional[Any] , _A : np.ndarray , _A : int , _A : Optional[int] , _A : Optional[ChannelDimension] = None , **_A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Dict = get_image_size(_A )
# Rounds the height and width down to the closest multiple of size_divisor
UpperCAmelCase__ : Union[str, Any] = height // size_divisor * size_divisor
UpperCAmelCase__ : Dict = width // size_divisor * size_divisor
UpperCAmelCase__ : Any = resize(_A , (new_h, new_w) , resample=_A , data_format=_A , **_A )
return image
def lowercase_ ( self : Optional[int] , _A : np.ndarray , _A : float , _A : Optional[ChannelDimension] = None , **_A : Dict ):
'''simple docstring'''
return rescale(image=_A , scale=_A , data_format=_A , **_A )
def lowercase_ ( self : Tuple , _A : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , _A : Optional[bool] = None , _A : Optional[int] = None , _A : Any=None , _A : Optional[bool] = None , _A : Optional[Union[TensorType, str]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : int = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : str = size_divisor if size_divisor is not None else self.size_divisor
UpperCAmelCase__ : int = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
UpperCAmelCase__ : Optional[int] = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
UpperCAmelCase__ : List[str] = [to_numpy_array(_A ) for img in images]
if do_resize:
UpperCAmelCase__ : Optional[int] = [self.resize(_A , size_divisor=_A , resample=_A ) for image in images]
if do_rescale:
UpperCAmelCase__ : List[str] = [self.rescale(_A , scale=1 / 255 ) for image in images]
UpperCAmelCase__ : Optional[int] = [to_channel_dimension_format(_A , _A ) for image in images]
UpperCAmelCase__ : Dict = {'''pixel_values''': images}
return BatchFeature(data=_A , tensor_type=_A )
| 75
|
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowercase_ :
"""simple docstring"""
def __init__( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : str=3 , __lowerCamelCase : List[Any]=3_2 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : str=1_0 , __lowerCamelCase : Union[str, Any]=[8, 1_6, 3_2, 6_4] , __lowerCamelCase : Union[str, Any]=[1, 1, 2, 1] , __lowerCamelCase : List[str]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : str="relu" , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : List[str]=["stage2", "stage3", "stage4"] , __lowerCamelCase : List[Any]=[2, 3, 4] , __lowerCamelCase : int=1 , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = embeddings_size
_SCREAMING_SNAKE_CASE = hidden_sizes
_SCREAMING_SNAKE_CASE = depths
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = scope
_SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = out_features
_SCREAMING_SNAKE_CASE = out_indices
_SCREAMING_SNAKE_CASE = num_groups
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
_SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def lowerCAmelCase_ ( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = BitModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def lowerCAmelCase_ ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = BitForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : List[str] , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : int ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = BitBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = BitBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = config_and_inputs
_SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ ( A , A , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase_ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
lowerCamelCase_ = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = BitModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
return
@unittest.skip(reason="Bit does not output attentions" )
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason="Bit does not use inputs_embeds" )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason="Bit does not support input and output embeddings" )
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCamelCase )
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(config=__lowerCamelCase )
for name, module in model.named_modules():
if isinstance(__lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
def check_hidden_states_output(__lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : int ):
_SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
_SCREAMING_SNAKE_CASE = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_SCREAMING_SNAKE_CASE = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE = ["preactivation", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
_SCREAMING_SNAKE_CASE = layer_type
_SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@unittest.skip(reason="Bit does not use feedforward chunking" )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = BitModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( ) -> Any:
_SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = self.default_image_processor
_SCREAMING_SNAKE_CASE = prepare_img()
_SCREAMING_SNAKE_CASE = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**__lowerCamelCase )
# verify the logits
_SCREAMING_SNAKE_CASE = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
@require_torch
class lowercase_ ( A , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase_ = (BitBackbone,) if is_torch_available() else ()
lowerCamelCase_ = BitConfig
lowerCamelCase_ = False
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = BitModelTester(self )
| 418
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = ["input_features", "attention_mask"]
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Dict=80 , __SCREAMING_SNAKE_CASE : Optional[int]=1_6000 , __SCREAMING_SNAKE_CASE : Any=0.0 , __SCREAMING_SNAKE_CASE : str=10 , __SCREAMING_SNAKE_CASE : Optional[Any]=25 , __SCREAMING_SNAKE_CASE : List[str]="hamming_window" , __SCREAMING_SNAKE_CASE : Any=3_2768.0 , __SCREAMING_SNAKE_CASE : List[Any]=0.97 , __SCREAMING_SNAKE_CASE : Any=1.0 , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : List[str]=False , **__SCREAMING_SNAKE_CASE : Tuple , ) -> List[Any]:
super().__init__(feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : str = feature_size
a_ : Optional[Any] = sampling_rate
a_ : int = padding_value
a_ : Optional[int] = hop_length
a_ : Dict = win_length
a_ : Tuple = frame_signal_scale
a_ : str = preemphasis_coeff
a_ : Optional[Any] = mel_floor
a_ : Tuple = normalize_means
a_ : int = normalize_vars
a_ : List[Any] = win_function
a_ : int = return_attention_mask
a_ : List[Any] = win_length * sampling_rate // 1000
a_ : Optional[Any] = hop_length * sampling_rate // 1000
a_ : Optional[Any] = optimal_fft_length(self.sample_size )
a_ : Optional[int] = (self.n_fft // 2) + 1
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : np.array ) -> np.ndarray:
if self.win_function == "hamming_window":
a_ : List[Any] = window_function(window_length=self.sample_size , name=self.win_function , periodic=__SCREAMING_SNAKE_CASE )
else:
a_ : int = window_function(window_length=self.sample_size , name=self.win_function )
a_ : List[str] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
a_ : int = spectrogram(
one_waveform * self.frame_signal_scale , window=__SCREAMING_SNAKE_CASE , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=__SCREAMING_SNAKE_CASE , preemphasis=self.preemphasis_coeff , mel_filters=__SCREAMING_SNAKE_CASE , mel_floor=self.mel_floor , log_mel='''log''' , )
return msfc_features.T
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]:
# make sure we normalize float32 arrays
if self.normalize_means:
a_ : Optional[int] = x[:input_length].mean(axis=0 )
a_ : Tuple = np.subtract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if self.normalize_vars:
a_ : Optional[Any] = x[:input_length].std(axis=0 )
a_ : List[Any] = np.divide(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if input_length < x.shape[0]:
a_ : List[str] = padding_value
# make sure array is in float32
a_ : Union[str, Any] = x.astype(np.floataa )
return x
def SCREAMING_SNAKE_CASE ( self : int , __SCREAMING_SNAKE_CASE : List[np.ndarray] , __SCREAMING_SNAKE_CASE : Optional[np.ndarray] = None ) -> List[np.ndarray]:
a_ : List[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.padding_value ) for x, n in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )]
def __call__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __SCREAMING_SNAKE_CASE : Union[bool, str, PaddingStrategy] = False , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , **__SCREAMING_SNAKE_CASE : Dict , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
a_ : str = isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
a_ : Tuple = is_batched_numpy or (
isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a_ : Tuple = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
a_ : Optional[Any] = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
a_ : int = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
a_ : Dict = [raw_speech]
# extract fbank features
a_ : int = [self._extract_mfsc_features(__SCREAMING_SNAKE_CASE ) for one_waveform in raw_speech]
# convert into correct format for padding
a_ : List[Any] = BatchFeature({'''input_features''': features} )
a_ : Union[str, Any] = self.pad(
__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
# make sure list is in array format
a_ : int = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , __SCREAMING_SNAKE_CASE ):
a_ : Optional[int] = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features]
a_ : str = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
a_ : List[Any] = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
a_ : List[str] = (
np.array(__SCREAMING_SNAKE_CASE , dtype=np.intaa )
if self._get_padding_strategies(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
a_ : int = self.normalize(
padded_inputs['''input_features'''] , attention_mask=__SCREAMING_SNAKE_CASE )
if return_tensors is not None:
a_ : int = padded_inputs.convert_to_tensors(__SCREAMING_SNAKE_CASE )
return padded_inputs
| 666
|
'''simple docstring'''
import sys
__lowerCAmelCase = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def _UpperCAmelCase ( __A : str ):
a_ : Tuple = 1
for digit in s:
product *= int(__A )
return product
def _UpperCAmelCase ( __A : str = N ):
a_ : Dict = -sys.maxsize - 1
a_ : Optional[int] = n[:13]
a_ : str = 13
while cur_index < len(__A ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
a_ : Tuple = substr[1:] + n[cur_index]
cur_index += 1
else:
a_ : Dict = max(__A , str_eval(__A ) )
a_ : List[str] = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 666
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 520
|
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class _a :
'''simple docstring'''
def __init__( self , __UpperCAmelCase=None , **__UpperCAmelCase ):
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
__A : Optional[Any] = model
__A : str = kwargs.get("model_save_dir" , __UpperCAmelCase )
__A : List[str] = kwargs.get("latest_model_name" , __UpperCAmelCase )
def __call__( self , **__UpperCAmelCase ):
__A : Any = {k: np.array(__UpperCAmelCase ) for k, v in kwargs.items()}
return self.model.run(__UpperCAmelCase , __UpperCAmelCase )
@staticmethod
def __UpperCAmelCase( __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None ):
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
__A : Optional[Any] = "CPUExecutionProvider"
return ort.InferenceSession(__UpperCAmelCase , providers=[provider] , sess_options=__UpperCAmelCase )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase ):
__A : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME
__A : Any = self.model_save_dir.joinpath(self.latest_model_name )
__A : List[str] = Path(__UpperCAmelCase ).joinpath(__UpperCAmelCase )
try:
shutil.copyfile(__UpperCAmelCase , __UpperCAmelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
__A : str = self.model_save_dir.joinpath(__UpperCAmelCase )
if src_path.exists():
__A : Any = Path(__UpperCAmelCase ).joinpath(__UpperCAmelCase )
try:
shutil.copyfile(__UpperCAmelCase , __UpperCAmelCase )
except shutil.SameFileError:
pass
def __UpperCAmelCase( self , __UpperCAmelCase , **__UpperCAmelCase , ):
if os.path.isfile(__UpperCAmelCase ):
logger.error(F"Provided path ({save_directory}) should be a directory, not a file" )
return
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
# saving model weights/files
self._save_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
@classmethod
def __UpperCAmelCase( cls , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ):
__A : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(__UpperCAmelCase ):
__A : Union[str, Any] = OnnxRuntimeModel.load_model(
os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , provider=__UpperCAmelCase , sess_options=__UpperCAmelCase )
__A : List[Any] = Path(__UpperCAmelCase )
# load model from hub
else:
# download model
__A : List[str] = hf_hub_download(
repo_id=__UpperCAmelCase , filename=__UpperCAmelCase , use_auth_token=__UpperCAmelCase , revision=__UpperCAmelCase , cache_dir=__UpperCAmelCase , force_download=__UpperCAmelCase , )
__A : Optional[int] = Path(__UpperCAmelCase ).parent
__A : List[str] = Path(__UpperCAmelCase ).name
__A : List[str] = OnnxRuntimeModel.load_model(__UpperCAmelCase , provider=__UpperCAmelCase , sess_options=__UpperCAmelCase )
return cls(model=__UpperCAmelCase , **__UpperCAmelCase )
@classmethod
def __UpperCAmelCase( cls , __UpperCAmelCase , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ):
__A : Tuple = None
if len(str(__UpperCAmelCase ).split("@" ) ) == 2:
__A , __A : int = model_id.split("@" )
return cls._from_pretrained(
model_id=__UpperCAmelCase , revision=__UpperCAmelCase , cache_dir=__UpperCAmelCase , force_download=__UpperCAmelCase , use_auth_token=__UpperCAmelCase , **__UpperCAmelCase , )
| 520
| 1
|
import math
import unittest
def __lowerCAmelCase ( __lowerCamelCase : int ) -> bool:
assert isinstance(__lowerCamelCase , __lowerCamelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class __a ( unittest.TestCase ):
def UpperCamelCase ( self : Optional[Any])-> Optional[int]:
self.assertTrue(is_prime(2))
self.assertTrue(is_prime(3))
self.assertTrue(is_prime(5))
self.assertTrue(is_prime(7))
self.assertTrue(is_prime(11))
self.assertTrue(is_prime(13))
self.assertTrue(is_prime(17))
self.assertTrue(is_prime(19))
self.assertTrue(is_prime(23))
self.assertTrue(is_prime(29))
def UpperCamelCase ( self : List[Any])-> int:
with self.assertRaises(snake_case_):
is_prime(-19)
self.assertFalse(
is_prime(0) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2))
self.assertFalse(is_prime(2 * 3))
self.assertFalse(is_prime(3 * 3))
self.assertFalse(is_prime(3 * 5))
self.assertFalse(is_prime(3 * 5 * 7))
if __name__ == "__main__":
unittest.main()
| 456
|
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def __lowerCAmelCase ( __lowerCamelCase : List[Any] ) -> int: # picklable for multiprocessing
return x.sum()
def __lowerCAmelCase ( __lowerCamelCase : Any ) -> Any: # picklable for multiprocessing
return i + 1
@dataclass
class __a :
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
class __a ( SCREAMING_SNAKE_CASE ):
def UpperCamelCase ( self : List[Any])-> Tuple:
__lowerCAmelCase ={}
__lowerCAmelCase =[]
__lowerCAmelCase =1
__lowerCAmelCase =[1, 2]
__lowerCAmelCase ={"""a""": 1, """b""": 2}
__lowerCAmelCase ={"""a""": [1, 2], """b""": [3, 4]}
__lowerCAmelCase ={"""a""": {"""1""": 1}, """b""": 2}
__lowerCAmelCase ={"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
__lowerCAmelCase ={}
__lowerCAmelCase =[]
__lowerCAmelCase =2
__lowerCAmelCase =[2, 3]
__lowerCAmelCase ={"""a""": 2, """b""": 3}
__lowerCAmelCase ={"""a""": [2, 3], """b""": [4, 5]}
__lowerCAmelCase ={"""a""": {"""1""": 2}, """b""": 3}
__lowerCAmelCase ={"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
self.assertEqual(map_nested(snake_case_ , snake_case_) , snake_case_)
self.assertEqual(map_nested(snake_case_ , snake_case_) , snake_case_)
self.assertEqual(map_nested(snake_case_ , snake_case_) , snake_case_)
self.assertEqual(map_nested(snake_case_ , snake_case_) , snake_case_)
self.assertEqual(map_nested(snake_case_ , snake_case_) , snake_case_)
self.assertEqual(map_nested(snake_case_ , snake_case_) , snake_case_)
self.assertEqual(map_nested(snake_case_ , snake_case_) , snake_case_)
self.assertEqual(map_nested(snake_case_ , snake_case_) , snake_case_)
__lowerCAmelCase =2
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_) , snake_case_)
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_) , snake_case_)
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_) , snake_case_)
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_) , snake_case_)
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_) , snake_case_)
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_) , snake_case_)
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_) , snake_case_)
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_) , snake_case_)
__lowerCAmelCase ={"""a""": np.eye(2), """b""": np.zeros(3), """c""": np.ones(2)}
__lowerCAmelCase ={"""a""": 2, """b""": 0, """c""": 2}
__lowerCAmelCase ={
"""a""": np.eye(2).astype(snake_case_),
"""b""": np.zeros(3).astype(snake_case_),
"""c""": np.ones(2).astype(snake_case_),
}
self.assertEqual(map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_) , snake_case_)
self.assertEqual(
{k: v.tolist() for k, v in map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ , num_proc=snake_case_) , snake_case_)
self.assertEqual(
{k: v.tolist() for k, v in map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ , num_proc=snake_case_).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(snake_case_): # can't pickle a local lambda
map_nested(lambda snake_case_: x + 1 , snake_case_ , num_proc=snake_case_)
def UpperCamelCase ( self : Any)-> int:
__lowerCAmelCase ={"""a""": 1, """b""": 2}
__lowerCAmelCase ={"""a""": 3, """b""": 4}
__lowerCAmelCase ={"""a""": 5, """b""": 6}
__lowerCAmelCase =sorted([("""a""", (1, 3, 5)), ("""b""", (2, 4, 6))])
self.assertEqual(sorted(zip_dict(snake_case_ , snake_case_ , snake_case_)) , snake_case_)
def UpperCamelCase ( self : Optional[Any])-> Optional[int]:
class __a :
SCREAMING_SNAKE_CASE = "bar"
__lowerCAmelCase =Foo()
self.assertEqual(foo.my_attr , """bar""")
with temporary_assignment(snake_case_ , """my_attr""" , """BAR"""):
self.assertEqual(foo.my_attr , """BAR""")
self.assertEqual(foo.my_attr , """bar""")
@pytest.mark.parametrize(
"""iterable_length, num_proc, expected_num_proc""" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def __lowerCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Dict ) -> Union[str, Any]:
with patch("""datasets.utils.py_utils._single_map_nested""" ) as mock_single_map_nested, patch(
"""datasets.parallel.parallel.Pool""" ) as mock_multiprocessing_pool:
__lowerCAmelCase ={f"""{i}""": i for i in range(__lowerCamelCase )}
__lowerCAmelCase =map_nested(lambda __lowerCamelCase : x + 10 , __lowerCamelCase , num_proc=__lowerCamelCase , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class __a ( SCREAMING_SNAKE_CASE ):
@require_tf
def UpperCamelCase ( self : Tuple)-> Optional[int]:
import tensorflow as tf
from tensorflow.keras import layers
__lowerCAmelCase =layers.Dense(2)
def gen_random_output():
__lowerCAmelCase =tf.random.uniform((1, 3))
return model(snake_case_).numpy()
with temp_seed(42 , set_tensorflow=snake_case_):
__lowerCAmelCase =gen_random_output()
with temp_seed(42 , set_tensorflow=snake_case_):
__lowerCAmelCase =gen_random_output()
__lowerCAmelCase =gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_)
self.assertGreater(np.abs(outa - outa).sum() , 0)
@require_torch
def UpperCamelCase ( self : Dict)-> Dict:
import torch
def gen_random_output():
__lowerCAmelCase =torch.nn.Linear(3 , 2)
__lowerCAmelCase =torch.rand(1 , 3)
return model(snake_case_).detach().numpy()
with temp_seed(42 , set_pytorch=snake_case_):
__lowerCAmelCase =gen_random_output()
with temp_seed(42 , set_pytorch=snake_case_):
__lowerCAmelCase =gen_random_output()
__lowerCAmelCase =gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_)
self.assertGreater(np.abs(outa - outa).sum() , 0)
def UpperCamelCase ( self : Union[str, Any])-> List[Any]:
def gen_random_output():
return np.random.rand(1 , 3)
with temp_seed(42):
__lowerCAmelCase =gen_random_output()
with temp_seed(42):
__lowerCAmelCase =gen_random_output()
__lowerCAmelCase =gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_)
self.assertGreater(np.abs(outa - outa).sum() , 0)
@pytest.mark.parametrize("""input_data""" , [{}] )
def __lowerCAmelCase ( __lowerCamelCase : List[str] ) -> List[str]:
__lowerCAmelCase =NestedDataStructure(__lowerCamelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"""data, expected_output""" , [
({}, []),
([], []),
("""foo""", ["""foo"""]),
(["""foo""", """bar"""], ["""foo""", """bar"""]),
([["""foo""", """bar"""]], ["""foo""", """bar"""]),
([[["""foo"""], ["""bar"""]]], ["""foo""", """bar"""]),
([[["""foo"""], """bar"""]], ["""foo""", """bar"""]),
({"""a""": 1, """b""": 2}, [1, 2]),
({"""a""": [1, 2], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[[3], [4]]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, [4]]}, [1, 2, 3, 4]),
({"""a""": {"""1""": 1}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": [2]}, [1, 2]),
] , )
def __lowerCAmelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : int ) -> List[str]:
__lowerCAmelCase =NestedDataStructure(__lowerCamelCase ).flatten()
assert output == expected_output
def __lowerCAmelCase ( ) -> int:
__lowerCAmelCase =A(x=1 , y="""foobar""" )
__lowerCAmelCase ={"""x""": 1, """y""": """foobar"""}
assert asdict(__lowerCamelCase ) == expected_output
__lowerCAmelCase ={"""a""": {"""b""": A(x=10 , y="""foo""" )}, """c""": [A(x=20 , y="""bar""" )]}
__lowerCAmelCase ={"""a""": {"""b""": {"""x""": 10, """y""": """foo"""}}, """c""": [{"""x""": 20, """y""": """bar"""}]}
assert asdict(__lowerCamelCase ) == expected_output
with pytest.raises(__lowerCamelCase ):
asdict([1, A(x=10 , y="""foo""" )] )
def __lowerCAmelCase ( __lowerCamelCase : str ) -> Dict:
return text.split()
def __lowerCAmelCase ( __lowerCamelCase : List[Any] ) -> Any:
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def __lowerCAmelCase ( ) -> Any:
with Pool(2 ) as pool:
__lowerCAmelCase =list(iflatmap_unordered(__lowerCamelCase , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(__lowerCamelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
__lowerCAmelCase =list(iflatmap_unordered(__lowerCamelCase , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(__lowerCamelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
__lowerCAmelCase =[]
for yield_time, content in iflatmap_unordered(
__lowerCamelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"""content""": """a"""}, {"""content""": """b"""}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(__lowerCamelCase )
assert out.count("""a""" ) == 2
assert out.count("""b""" ) == 2
assert len(__lowerCamelCase ) == 4
| 456
| 1
|
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
__a :Tuple = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
__a :List[Any] = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'
__a :List[str] = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
__a :List[str] = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'
__a :str = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def __A ( self : Dict ):
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/openai/human-eval" , codebase_urls=["https://github.com/openai/human-eval"] , reference_urls=["https://github.com/openai/human-eval"] , license=_LICENSE , )
def __A ( self : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any]=[1, 10, 100] , UpperCAmelCase : Tuple=4 , UpperCAmelCase : Union[str, Any]=3.0 ):
if os.getenv("HF_ALLOW_CODE_EVAL" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("This metric is currently not supported on Windows." )
with ThreadPoolExecutor(max_workers=UpperCAmelCase ) as executor:
A_ = []
A_ = Counter()
A_ = 0
A_ = defaultdict(UpperCAmelCase )
for task_id, (candidates, test_case) in enumerate(zip(UpperCAmelCase , UpperCAmelCase ) ):
for candidate in candidates:
A_ = candidate + "\n" + test_case
A_ = (test_program, timeout, task_id, completion_id[task_id])
A_ = executor.submit(UpperCAmelCase , *UpperCAmelCase )
futures.append(UpperCAmelCase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(UpperCAmelCase ):
A_ = future.result()
results[result["task_id"]].append((result["completion_id"], result) )
A_ , A_ = [], []
for result in results.values():
result.sort()
A_ = [r[1]["passed"] for r in result]
total.append(len(UpperCAmelCase ) )
correct.append(sum(UpperCAmelCase ) )
A_ = np.array(UpperCAmelCase )
A_ = np.array(UpperCAmelCase )
A_ = k
A_ = {f'''pass@{k}''': estimate_pass_at_k(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Dict ,__UpperCamelCase : Any ):
"""simple docstring"""
def estimator(__UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 ,n + 1 ) )
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = itertools.repeat(__UpperCamelCase ,len(__UpperCamelCase ) )
else:
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
A_ = iter(__UpperCamelCase )
return np.array([estimator(int(__UpperCamelCase ) ,int(__UpperCamelCase ) ,__UpperCamelCase ) for n, c in zip(__UpperCamelCase ,__UpperCamelCase )] )
| 86
|
'''simple docstring'''
from graphs.minimum_spanning_tree_kruskal import kruskal
def __snake_case ( ):
UpperCamelCase = 9
UpperCamelCase = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
UpperCamelCase = kruskal(_UpperCAmelCase, _UpperCAmelCase)
UpperCamelCase = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(_UpperCAmelCase) == sorted(_UpperCAmelCase)
| 212
| 0
|
import numpy as np
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = (0, 0)
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : Any = 0
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : Any = 0
def __eq__( self : List[str] , a : Any ) -> Tuple:
"""simple docstring"""
return self.position == cell.position
def __UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
print(self.position )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[Any] , a : List[Any]=(5, 5) ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = np.zeros(a )
SCREAMING_SNAKE_CASE : str = world_size[0]
SCREAMING_SNAKE_CASE : int = world_size[1]
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
print(self.w )
def __UpperCamelCase ( self : Dict , a : Dict ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
SCREAMING_SNAKE_CASE : Union[str, Any] = cell.position[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = cell.position[1]
SCREAMING_SNAKE_CASE : Optional[int] = []
for n in neughbour_cord:
SCREAMING_SNAKE_CASE : Optional[Any] = current_x + n[0]
SCREAMING_SNAKE_CASE : int = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
SCREAMING_SNAKE_CASE : int = Cell()
SCREAMING_SNAKE_CASE : str = (x, y)
SCREAMING_SNAKE_CASE : Any = cell
neighbours.append(a )
return neighbours
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : Tuple = []
SCREAMING_SNAKE_CASE : Tuple = []
_open.append(_a)
while _open:
SCREAMING_SNAKE_CASE : Union[str, Any] = np.argmin([n.f for n in _open])
SCREAMING_SNAKE_CASE : List[str] = _open[min_f]
_closed.append(_open.pop(_a))
if current == goal:
break
for n in world.get_neigbours(_a):
for c in _closed:
if c == n:
continue
SCREAMING_SNAKE_CASE : Tuple = current.g + 1
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = n.position
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = goal.position
SCREAMING_SNAKE_CASE : Any = (ya - ya) ** 2 + (xa - xa) ** 2
SCREAMING_SNAKE_CASE : List[str] = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(_a)
SCREAMING_SNAKE_CASE : Tuple = []
while current.parent is not None:
path.append(current.position)
SCREAMING_SNAKE_CASE : Union[str, Any] = current.parent
path.append(current.position)
return path[::-1]
if __name__ == "__main__":
a_ = Gridworld()
# Start position and goal
a_ = Cell()
a_ = (0, 0)
a_ = Cell()
a_ = (4, 4)
print(F'''path from {start.position} to {goal.position}''')
a_ = astar(world, start, goal)
# Just for visual reasons.
for i in s:
a_ = 1
print(world.w)
| 193
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any , a : int , a : Optional[int]=7 , a : List[Any]=3 , a : Any=30 , a : Dict=400 , a : str=True , a : List[Any]=None , a : List[str]=True , a : Optional[Any]=[0.5, 0.5, 0.5] , a : Optional[int]=[0.5, 0.5, 0.5] , a : Union[str, Any]=True , a : Tuple=1 / 255 , a : Optional[int]=True , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
SCREAMING_SNAKE_CASE : Any = parent
SCREAMING_SNAKE_CASE : List[Any] = batch_size
SCREAMING_SNAKE_CASE : str = num_channels
SCREAMING_SNAKE_CASE : Optional[int] = min_resolution
SCREAMING_SNAKE_CASE : Optional[Any] = max_resolution
SCREAMING_SNAKE_CASE : str = do_resize
SCREAMING_SNAKE_CASE : Any = size
SCREAMING_SNAKE_CASE : str = do_normalize
SCREAMING_SNAKE_CASE : Union[str, Any] = image_mean
SCREAMING_SNAKE_CASE : Dict = image_std
SCREAMING_SNAKE_CASE : Any = do_rescale
SCREAMING_SNAKE_CASE : Union[str, Any] = rescale_factor
SCREAMING_SNAKE_CASE : Union[str, Any] = do_pad
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCamelCase ( self : Any , a : Union[str, Any] , a : int=False ) -> int:
"""simple docstring"""
if not batched:
SCREAMING_SNAKE_CASE : Tuple = image_inputs[0]
if isinstance(a , Image.Image ):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = image.size
else:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE : Optional[Any] = int(self.size["shortest_edge"] * h / w )
SCREAMING_SNAKE_CASE : str = self.size["shortest_edge"]
elif w > h:
SCREAMING_SNAKE_CASE : Optional[int] = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE : str = int(self.size["shortest_edge"] * w / h )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE : Dict = self.size["shortest_edge"]
else:
SCREAMING_SNAKE_CASE : str = []
for image in image_inputs:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE : Dict = max(a , key=lambda a : item[0] )[0]
SCREAMING_SNAKE_CASE : str = max(a , key=lambda a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _UpperCamelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =DetaImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = DetaImageProcessingTester(self )
@property
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "image_mean" ) )
self.assertTrue(hasattr(a , "image_std" ) )
self.assertTrue(hasattr(a , "do_normalize" ) )
self.assertTrue(hasattr(a , "do_resize" ) )
self.assertTrue(hasattr(a , "do_rescale" ) )
self.assertTrue(hasattr(a , "do_pad" ) )
self.assertTrue(hasattr(a , "size" ) )
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , a )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor_tester.get_expected_values(a , batched=a )
SCREAMING_SNAKE_CASE : str = image_processing(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE : List[str] = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
SCREAMING_SNAKE_CASE : int = json.loads(f.read() )
SCREAMING_SNAKE_CASE : List[str] = {"image_id": 3_9769, "annotations": target}
# encode them
SCREAMING_SNAKE_CASE : int = DetaImageProcessor()
SCREAMING_SNAKE_CASE : Tuple = image_processing(images=a , annotations=a , return_tensors="pt" )
# verify pixel values
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , a )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , a , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE : Any = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , a ) )
# verify boxes
SCREAMING_SNAKE_CASE : str = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , a )
SCREAMING_SNAKE_CASE : Dict = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , a , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE : str = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , a ) )
# verify is_crowd
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , a ) )
# verify class_labels
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , a ) )
# verify orig_size
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , a ) )
# verify size
SCREAMING_SNAKE_CASE : int = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , a ) )
@slow
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
SCREAMING_SNAKE_CASE : Any = json.loads(f.read() )
SCREAMING_SNAKE_CASE : Optional[Any] = {"file_name": "000000039769.png", "image_id": 3_9769, "segments_info": target}
SCREAMING_SNAKE_CASE : Optional[int] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
SCREAMING_SNAKE_CASE : Optional[Any] = DetaImageProcessor(format="coco_panoptic" )
SCREAMING_SNAKE_CASE : str = image_processing(images=a , annotations=a , masks_path=a , return_tensors="pt" )
# verify pixel values
SCREAMING_SNAKE_CASE : Dict = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , a )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , a , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE : str = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , a ) )
# verify boxes
SCREAMING_SNAKE_CASE : int = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , a )
SCREAMING_SNAKE_CASE : str = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , a , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE : int = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , a ) )
# verify is_crowd
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , a ) )
# verify class_labels
SCREAMING_SNAKE_CASE : Dict = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , a ) )
# verify masks
SCREAMING_SNAKE_CASE : Tuple = 82_2873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , a )
# verify orig_size
SCREAMING_SNAKE_CASE : Any = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , a ) )
# verify size
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , a ) )
| 193
| 1
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_lowerCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase__ ( snake_case__ ):
snake_case_ = '''upernet'''
def __init__( self , A__=None , A__=512 , A__=0.02 , A__=[1, 2, 3, 6] , A__=True , A__=0.4 , A__=384 , A__=256 , A__=1 , A__=False , A__=255 , **A__ , ):
"""simple docstring"""
super().__init__(**A__ )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase_: int = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(A__ , A__ ):
UpperCAmelCase_: Dict = backbone_config.get("model_type" )
UpperCAmelCase_: Optional[Any] = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_: Optional[Any] = config_class.from_dict(A__ )
UpperCAmelCase_: int = backbone_config
UpperCAmelCase_: Any = hidden_size
UpperCAmelCase_: List[str] = initializer_range
UpperCAmelCase_: Dict = pool_scales
UpperCAmelCase_: List[Any] = use_auxiliary_head
UpperCAmelCase_: Tuple = auxiliary_loss_weight
UpperCAmelCase_: int = auxiliary_in_channels
UpperCAmelCase_: str = auxiliary_channels
UpperCAmelCase_: Union[str, Any] = auxiliary_num_convs
UpperCAmelCase_: Union[str, Any] = auxiliary_concat_input
UpperCAmelCase_: Dict = loss_ignore_index
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Tuple = copy.deepcopy(self.__dict__ )
UpperCAmelCase_: List[Any] = self.backbone_config.to_dict()
UpperCAmelCase_: Optional[int] = self.__class__.model_type
return output
| 137
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""microsoft/swinv2-tiny-patch4-window8-256""": (
"""https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"""
),
}
class UpperCAmelCase__ ( snake_case__ ):
snake_case_ = '''swinv2'''
snake_case_ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , A__=224 , A__=4 , A__=3 , A__=96 , A__=[2, 2, 6, 2] , A__=[3, 6, 12, 24] , A__=7 , A__=4.0 , A__=True , A__=0.0 , A__=0.0 , A__=0.1 , A__="gelu" , A__=False , A__=0.02 , A__=1E-5 , A__=32 , **A__ , ):
"""simple docstring"""
super().__init__(**A__ )
UpperCAmelCase_: List[str] = image_size
UpperCAmelCase_: List[str] = patch_size
UpperCAmelCase_: str = num_channels
UpperCAmelCase_: Optional[int] = embed_dim
UpperCAmelCase_: str = depths
UpperCAmelCase_: Optional[Any] = len(A__ )
UpperCAmelCase_: Optional[Any] = num_heads
UpperCAmelCase_: Dict = window_size
UpperCAmelCase_: Dict = mlp_ratio
UpperCAmelCase_: Optional[Any] = qkv_bias
UpperCAmelCase_: Optional[Any] = hidden_dropout_prob
UpperCAmelCase_: Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_: int = drop_path_rate
UpperCAmelCase_: Union[str, Any] = hidden_act
UpperCAmelCase_: Any = use_absolute_embeddings
UpperCAmelCase_: Optional[int] = layer_norm_eps
UpperCAmelCase_: str = initializer_range
UpperCAmelCase_: Tuple = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_: Union[str, Any] = int(embed_dim * 2 ** (len(A__ ) - 1) )
UpperCAmelCase_: str = (0, 0, 0, 0)
| 137
| 1
|
def __UpperCamelCase ( _A : int = 1000000 ) ->int:
"""simple docstring"""
lowerCamelCase_ =1
lowerCamelCase_ =1
lowerCamelCase_ ={1: 1}
for inputa in range(2 , _A ):
lowerCamelCase_ =0
lowerCamelCase_ =inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCamelCase_ =(3 * number) + 1
counter += 1
if inputa not in counters:
lowerCamelCase_ =counter
if counter > pre_counter:
lowerCamelCase_ =inputa
lowerCamelCase_ =counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 75
|
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__A : Optional[Any] = '\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n'
__A : Tuple = '\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n'
__A : str = '\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def __UpperCamelCase ( _A : List[Any] , _A : Union[str, Any] ) ->Dict:
"""simple docstring"""
return float((preds == labels).mean() )
def __UpperCamelCase ( _A : Union[str, Any] , _A : Union[str, Any] , _A : List[Any]="binary" ) ->List[Any]:
"""simple docstring"""
lowerCamelCase_ =simple_accuracy(_A , _A )
lowerCamelCase_ =float(fa_score(y_true=_A , y_pred=_A , average=_A ) )
return {
"accuracy": acc,
"f1": fa,
}
def __UpperCamelCase ( _A : int , _A : Union[str, Any] ) ->int:
"""simple docstring"""
lowerCamelCase_ ={}
for id_pred, label in zip(_A , _A ):
lowerCamelCase_ =f'{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'
lowerCamelCase_ =id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCamelCase_ =[(pred, label)]
lowerCamelCase_ , lowerCamelCase_ =[], []
for question, preds_labels in question_map.items():
lowerCamelCase_ , lowerCamelCase_ =zip(*_A )
lowerCamelCase_ =fa_score(y_true=_A , y_pred=_A , average="""macro""" )
fas.append(_A )
lowerCamelCase_ =int(sum(pred == label for pred, label in preds_labels ) == len(_A ) )
ems.append(_A )
lowerCamelCase_ =float(sum(_A ) / len(_A ) )
lowerCamelCase_ =sum(_A ) / len(_A )
lowerCamelCase_ =float(fa_score(y_true=_A , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _SCREAMING_SNAKE_CASE ( datasets.Metric):
def _snake_case ( self )-> Union[str, Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , )
def _snake_case ( self )-> Optional[Any]:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Optional[int]:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
elif self.config_name == "cb":
return acc_and_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , fa_avg="""macro""" )
elif self.config_name == "record":
lowerCamelCase_ =[
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
lowerCamelCase_ ={pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 75
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''',
}
class _UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__A = '''open-llama'''
def __init__( self : List[str] , lowercase_ : List[Any]=100000 , lowercase_ : List[Any]=4096 , lowercase_ : Tuple=11008 , lowercase_ : int=32 , lowercase_ : List[str]=32 , lowercase_ : Tuple="silu" , lowercase_ : List[Any]=2048 , lowercase_ : List[Any]=0.02 , lowercase_ : List[Any]=1e-6 , lowercase_ : str=True , lowercase_ : Tuple=0 , lowercase_ : List[Any]=1 , lowercase_ : Tuple=2 , lowercase_ : Any=False , lowercase_ : int=True , lowercase_ : int=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : Tuple=True , lowercase_ : Any=True , lowercase_ : Any=None , **lowercase_ : Dict , ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = vocab_size
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = hidden_size
_UpperCamelCase = intermediate_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = initializer_range
_UpperCamelCase = rms_norm_eps
_UpperCamelCase = use_cache
_UpperCamelCase = kwargs.pop(
"use_memorry_efficient_attention" , __snake_case)
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_dropout_prob
_UpperCamelCase = use_stable_embedding
_UpperCamelCase = shared_input_output_embedding
_UpperCamelCase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , tie_word_embeddings=__snake_case , **__snake_case , )
def __UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __snake_case) or len(self.rope_scaling) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f'got {self.rope_scaling}')
_UpperCamelCase = self.rope_scaling.get("type" , __snake_case)
_UpperCamelCase = self.rope_scaling.get("factor" , __snake_case)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}')
if rope_scaling_factor is None or not isinstance(__snake_case , __snake_case) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}')
| 547
|
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
def __init__( self : Tuple , __snake_case : int , __snake_case : List[str]=13 , __snake_case : int=7 , __snake_case : int=True , __snake_case : int=True , __snake_case : Dict=True , __snake_case : str=True , __snake_case : Dict=99 , __snake_case : Optional[int]=32 , __snake_case : Optional[Any]=5 , __snake_case : Union[str, Any]=4 , __snake_case : Union[str, Any]=37 , __snake_case : int="gelu" , __snake_case : Union[str, Any]=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : Any=512 , __snake_case : Dict=16 , __snake_case : Optional[int]=2 , __snake_case : str=0.0_2 , __snake_case : int=4 , ):
lowerCamelCase :Union[str, Any] = parent
lowerCamelCase :str = batch_size
lowerCamelCase :Dict = seq_length
lowerCamelCase :int = is_training
lowerCamelCase :int = use_attention_mask
lowerCamelCase :Optional[Any] = use_token_type_ids
lowerCamelCase :int = use_labels
lowerCamelCase :List[Any] = vocab_size
lowerCamelCase :str = hidden_size
lowerCamelCase :Optional[int] = num_hidden_layers
lowerCamelCase :Tuple = num_attention_heads
lowerCamelCase :Tuple = intermediate_size
lowerCamelCase :Tuple = hidden_act
lowerCamelCase :Any = hidden_dropout_prob
lowerCamelCase :List[str] = attention_probs_dropout_prob
lowerCamelCase :Any = max_position_embeddings
lowerCamelCase :Dict = type_vocab_size
lowerCamelCase :int = type_sequence_label_size
lowerCamelCase :str = initializer_range
lowerCamelCase :Any = num_choices
def snake_case ( self : Any ):
lowerCamelCase :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase :Any = None
if self.use_attention_mask:
lowerCamelCase :int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase :Dict = None
if self.use_token_type_ids:
lowerCamelCase :Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase :List[str] = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def snake_case ( self : Any ):
lowerCamelCase :int = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :Any = config_and_inputs
lowerCamelCase :str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def snake_case ( self : str ):
lowerCamelCase :Optional[int] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :Optional[Any] = config_and_inputs
lowerCamelCase :List[Any] = True
lowerCamelCase :Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase :str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = True
_UpperCAmelCase = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case ( self : Optional[int] ):
lowerCamelCase :str = FlaxRobertaPreLayerNormModelTester(self )
@slow
def snake_case ( self : Any ):
for model_class_name in self.all_model_classes:
lowerCamelCase :Dict = model_class_name.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=__snake_case )
lowerCamelCase :Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__snake_case )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def snake_case ( self : Tuple ):
lowerCamelCase :List[str] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=__snake_case )
lowerCamelCase :int = np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa )
lowerCamelCase :List[str] = model(__snake_case )[0]
lowerCamelCase :Tuple = [1, 11, 50265]
self.assertEqual(list(output.shape ) , __snake_case )
# compare the actual values for a slice.
lowerCamelCase :Optional[Any] = np.array(
[[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , __snake_case , atol=1e-4 ) )
@slow
def snake_case ( self : Any ):
lowerCamelCase :Optional[int] = FlaxRobertaPreLayerNormModel.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=__snake_case )
lowerCamelCase :Union[str, Any] = np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa )
lowerCamelCase :int = model(__snake_case )[0]
# compare the actual values for a slice.
lowerCamelCase :List[Any] = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , __snake_case , atol=1e-4 ) )
| 166
| 0
|
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
lowerCAmelCase_ : Tuple = logging.get_logger(__name__)
lowerCAmelCase_ : Tuple = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
lowerCAmelCase_ : int = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
lowerCAmelCase_ : Union[str, Any] = {
"allenai/led-base-16384": 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def __a ( ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
lowercase_ = bs[:]
lowercase_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_A )
cs.append(2**8 + n )
n += 1
lowercase_ = [chr(_A ) for n in cs]
return dict(zip(_A , _A ) )
def __a ( __lowerCamelCase : Any ) -> Any:
'''simple docstring'''
lowercase_ = set()
lowercase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase_ = char
return pairs
class lowercase ( lowercase__ ):
lowerCamelCase_ =VOCAB_FILES_NAMES
lowerCamelCase_ =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ =['input_ids', 'attention_mask']
def __init__( self : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any]="replace" , __lowerCAmelCase : Optional[Any]="<s>" , __lowerCAmelCase : int="</s>" , __lowerCAmelCase : str="</s>" , __lowerCAmelCase : List[str]="<s>" , __lowerCAmelCase : Optional[int]="<unk>" , __lowerCAmelCase : List[str]="<pad>" , __lowerCAmelCase : Union[str, Any]="<mask>" , __lowerCAmelCase : Tuple=False , **__lowerCAmelCase : Optional[Any] , ) -> int:
lowercase_ = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else bos_token
lowercase_ = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else eos_token
lowercase_ = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else sep_token
lowercase_ = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else cls_token
lowercase_ = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else unk_token
lowercase_ = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase_ = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token
super().__init__(
errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding="utf-8") as vocab_handle:
lowercase_ = json.load(__lowerCamelCase)
lowercase_ = {v: k for k, v in self.encoder.items()}
lowercase_ = errors # how to handle errors in decoding
lowercase_ = bytes_to_unicode()
lowercase_ = {v: k for k, v in self.byte_encoder.items()}
with open(__lowerCamelCase , encoding="utf-8") as merges_handle:
lowercase_ = merges_handle.read().split("\n")[1:-1]
lowercase_ = [tuple(merge.split()) for merge in bpe_merges]
lowercase_ = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase))))
lowercase_ = {}
lowercase_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase_ = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+")
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def __UpperCAmelCase ( self : Union[str, Any]) -> Any:
return len(self.encoder)
def __UpperCAmelCase ( self : Union[str, Any]) -> str:
return dict(self.encoder , **self.added_tokens_encoder)
def __UpperCAmelCase ( self : Tuple , __lowerCAmelCase : Dict) -> Tuple:
if token in self.cache:
return self.cache[token]
lowercase_ = tuple(__lowerCamelCase)
lowercase_ = get_pairs(__lowerCamelCase)
if not pairs:
return token
while True:
lowercase_ = min(__lowerCamelCase , key=lambda __lowerCAmelCase: self.bpe_ranks.get(__lowerCamelCase , float("inf")))
if bigram not in self.bpe_ranks:
break
lowercase_ = bigram
lowercase_ = []
lowercase_ = 0
while i < len(__lowerCamelCase):
try:
lowercase_ = word.index(__lowerCamelCase , __lowerCamelCase)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
lowercase_ = j
if word[i] == first and i < len(__lowerCamelCase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
lowercase_ = tuple(__lowerCamelCase)
lowercase_ = new_word
if len(__lowerCamelCase) == 1:
break
else:
lowercase_ = get_pairs(__lowerCamelCase)
lowercase_ = " ".join(__lowerCamelCase)
lowercase_ = word
return word
def __UpperCAmelCase ( self : Tuple , __lowerCAmelCase : List[Any]) -> Any:
lowercase_ = []
for token in re.findall(self.pat , __lowerCamelCase):
lowercase_ = "".join(
self.byte_encoder[b] for b in token.encode("utf-8")) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase).split(" "))
return bpe_tokens
def __UpperCAmelCase ( self : Any , __lowerCAmelCase : int) -> Optional[int]:
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token))
def __UpperCAmelCase ( self : List[Any] , __lowerCAmelCase : Union[str, Any]) -> Any:
return self.decoder.get(__lowerCamelCase)
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCAmelCase : Tuple) -> Dict:
lowercase_ = "".join(__lowerCamelCase)
lowercase_ = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8" , errors=self.errors)
return text
def __UpperCAmelCase ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None) -> Optional[Any]:
if not os.path.isdir(__lowerCamelCase):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
lowercase_ = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
lowercase_ = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(__lowerCamelCase , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase) + "\n")
lowercase_ = 0
with open(__lowerCamelCase , "w" , encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCAmelCase: kv[1]):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
" Please check that the tokenizer is not corrupted!")
lowercase_ = token_index
writer.write(" ".join(__lowerCamelCase) + "\n")
index += 1
return vocab_file, merge_file
def __UpperCAmelCase ( self : int , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None) -> Optional[Any]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase_ = [self.cls_token_id]
lowercase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCAmelCase ( self : Tuple , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False) -> Any:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase)
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase)) + [1]
return [1] + ([0] * len(__lowerCamelCase)) + [1, 1] + ([0] * len(__lowerCamelCase)) + [1]
def __UpperCAmelCase ( self : List[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None) -> Dict:
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def __UpperCAmelCase ( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : Dict=False , **__lowerCAmelCase : List[str]) -> Tuple:
lowercase_ = kwargs.pop("add_prefix_space" , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase) > 0 and not text[0].isspace()):
lowercase_ = " " + text
return (text, kwargs)
def __UpperCAmelCase ( self : Dict , __lowerCAmelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[bool] = None , ) -> Any:
lowercase_ = super()._pad(
encoded_inputs=__lowerCamelCase , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
lowercase_ = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase_ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase_ = len(encoded_inputs["global_attention_mask"]) != len(__lowerCamelCase)
if needs_to_be_padded:
lowercase_ = len(__lowerCamelCase) - len(encoded_inputs["global_attention_mask"])
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase_ = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
lowercase_ = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side))
return encoded_inputs
| 705
|
'''simple docstring'''
from __future__ import annotations
def __a ( __lowerCamelCase : list[list[int]] ) -> bool:
'''simple docstring'''
lowercase_ = len(__lowerCamelCase )
# We need to create solution object to save path.
lowercase_ = [[0 for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )]
lowercase_ = run_maze(__lowerCamelCase , 0 , 0 , __lowerCamelCase )
if solved:
print("\n".join(str(__lowerCamelCase ) for row in solutions ) )
else:
print("No solution exists!" )
return solved
def __a ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : list[list[int]] ) -> bool:
'''simple docstring'''
lowercase_ = len(__lowerCamelCase )
# Final check point.
if i == j == (size - 1):
lowercase_ = 1
return True
lowercase_ = (not i < 0) and (not j < 0) # Check lower bounds
lowercase_ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
lowercase_ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
lowercase_ = 1
# check for directions
if (
run_maze(__lowerCamelCase , i + 1 , __lowerCamelCase , __lowerCamelCase )
or run_maze(__lowerCamelCase , __lowerCamelCase , j + 1 , __lowerCamelCase )
or run_maze(__lowerCamelCase , i - 1 , __lowerCamelCase , __lowerCamelCase )
or run_maze(__lowerCamelCase , __lowerCamelCase , j - 1 , __lowerCamelCase )
):
return True
lowercase_ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 461
| 0
|
"""simple docstring"""
import re
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
UpperCamelCase : int = re.compile(
r"""^(?:0|94|\+94|0{2}94)""" r"""7(0|1|2|4|5|6|7|8)""" r"""(-| |)""" r"""\d{7}$""" )
return bool(re.search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
__magic_name__ : List[str] = """0094702343221"""
print(is_sri_lankan_phone_number(phone))
| 102
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class UpperCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase__ (self : Any ) -> Optional[Any]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowercase = AutoConfig.from_pretrained(A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , A__ )
lowercase = TFAutoModel.from_pretrained(A__ , from_pt=A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , A__ )
lowercase = AutoModel.from_pretrained(A__ , from_tf=A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , A__ )
@slow
def UpperCAmelCase__ (self : str ) -> Optional[int]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowercase = AutoConfig.from_pretrained(A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , A__ )
lowercase = TFAutoModelForPreTraining.from_pretrained(A__ , from_pt=A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , A__ )
lowercase = AutoModelForPreTraining.from_pretrained(A__ , from_tf=A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , A__ )
@slow
def UpperCAmelCase__ (self : Any ) -> int:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = AutoConfig.from_pretrained(A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , A__ )
lowercase = TFAutoModelForCausalLM.from_pretrained(A__ , from_pt=A__ )
lowercase , lowercase = TFAutoModelForCausalLM.from_pretrained(
A__ , output_loading_info=A__ , from_pt=A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , A__ )
lowercase = AutoModelForCausalLM.from_pretrained(A__ , from_tf=A__ )
lowercase , lowercase = AutoModelForCausalLM.from_pretrained(
A__ , output_loading_info=A__ , from_tf=A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , A__ )
@slow
def UpperCAmelCase__ (self : Tuple ) -> List[str]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = AutoConfig.from_pretrained(A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , A__ )
lowercase = TFAutoModelWithLMHead.from_pretrained(A__ , from_pt=A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , A__ )
lowercase = AutoModelWithLMHead.from_pretrained(A__ , from_tf=A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , A__ )
@slow
def UpperCAmelCase__ (self : List[Any] ) -> Optional[Any]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = AutoConfig.from_pretrained(A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , A__ )
lowercase = TFAutoModelForMaskedLM.from_pretrained(A__ , from_pt=A__ )
lowercase , lowercase = TFAutoModelForMaskedLM.from_pretrained(
A__ , output_loading_info=A__ , from_pt=A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , A__ )
lowercase = AutoModelForMaskedLM.from_pretrained(A__ , from_tf=A__ )
lowercase , lowercase = AutoModelForMaskedLM.from_pretrained(
A__ , output_loading_info=A__ , from_tf=A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , A__ )
@slow
def UpperCAmelCase__ (self : List[Any] ) -> Optional[Any]:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = AutoConfig.from_pretrained(A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , A__ )
lowercase = TFAutoModelForSeqaSeqLM.from_pretrained(A__ , from_pt=A__ )
lowercase , lowercase = TFAutoModelForSeqaSeqLM.from_pretrained(
A__ , output_loading_info=A__ , from_pt=A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , A__ )
lowercase = AutoModelForSeqaSeqLM.from_pretrained(A__ , from_tf=A__ )
lowercase , lowercase = AutoModelForSeqaSeqLM.from_pretrained(
A__ , output_loading_info=A__ , from_tf=A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , A__ )
@slow
def UpperCAmelCase__ (self : Tuple ) -> Optional[int]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowercase = AutoConfig.from_pretrained(A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , A__ )
lowercase = TFAutoModelForSequenceClassification.from_pretrained(A__ , from_pt=A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , A__ )
lowercase = AutoModelForSequenceClassification.from_pretrained(A__ , from_tf=A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , A__ )
@slow
def UpperCAmelCase__ (self : List[Any] ) -> Union[str, Any]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowercase = AutoConfig.from_pretrained(A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , A__ )
lowercase = TFAutoModelForQuestionAnswering.from_pretrained(A__ , from_pt=A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , A__ )
lowercase = AutoModelForQuestionAnswering.from_pretrained(A__ , from_tf=A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , A__ )
def UpperCAmelCase__ (self : List[str] ) -> Optional[Any]:
lowercase = TFAutoModelWithLMHead.from_pretrained(A__ , from_pt=A__ )
self.assertIsInstance(A__ , A__ )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=A__ ) , 1_4_4_1_0 )
lowercase = AutoModelWithLMHead.from_pretrained(A__ , from_tf=A__ )
self.assertIsInstance(A__ , A__ )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=A__ ) , 1_4_4_1_0 )
def UpperCAmelCase__ (self : List[str] ) -> List[str]:
lowercase = TFAutoModelWithLMHead.from_pretrained(A__ , from_pt=A__ )
self.assertIsInstance(A__ , A__ )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=A__ ) , 1_4_4_1_0 )
lowercase = AutoModelWithLMHead.from_pretrained(A__ , from_tf=A__ )
self.assertIsInstance(A__ , A__ )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=A__ ) , 1_4_4_1_0 )
| 310
| 0
|
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
_UpperCamelCase = 25_6047
_UpperCamelCase = 25_6145
@require_sentencepiece
@require_tokenizers
class __lowercase (_UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase = NllbTokenizer
_UpperCamelCase = NllbTokenizerFast
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = {}
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase : List[Any] = NllbTokenizer(A_ , keep_accents=A_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
__lowerCAmelCase : Tuple = NllbTokenizer(A_ , keep_accents=A_ )
__lowerCAmelCase : Union[str, Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(A_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__lowerCAmelCase : Dict = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
A_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__lowerCAmelCase : List[str] = tokenizer.convert_tokens_to_ids(A_ )
self.assertListEqual(
A_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__lowerCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(
A_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase : Any = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-nllb''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__lowerCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained(A_ , **A_ )
__lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
__lowerCAmelCase : Optional[Any] = tokenizer_r.save_pretrained(A_ )
__lowerCAmelCase : Optional[int] = tokenizer_p.save_pretrained(A_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
__lowerCAmelCase : Optional[Any] = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(A_ , A_ )
# Checks everything loads correctly in the same way
__lowerCAmelCase : Optional[int] = tokenizer_r.from_pretrained(A_ )
__lowerCAmelCase : Optional[int] = tokenizer_p.from_pretrained(A_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A_ , A_ ) )
shutil.rmtree(A_ )
# Save tokenizer rust, legacy_format=True
__lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp()
__lowerCAmelCase : str = tokenizer_r.save_pretrained(A_ , legacy_format=A_ )
__lowerCAmelCase : Any = tokenizer_p.save_pretrained(A_ )
# Checks it save with the same files
self.assertSequenceEqual(A_ , A_ )
# Checks everything loads correctly in the same way
__lowerCAmelCase : str = tokenizer_r.from_pretrained(A_ )
__lowerCAmelCase : str = tokenizer_p.from_pretrained(A_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A_ , A_ ) )
shutil.rmtree(A_ )
# Save tokenizer rust, legacy_format=False
__lowerCAmelCase : Dict = tempfile.mkdtemp()
__lowerCAmelCase : Optional[int] = tokenizer_r.save_pretrained(A_ , legacy_format=A_ )
__lowerCAmelCase : Union[str, Any] = tokenizer_p.save_pretrained(A_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__lowerCAmelCase : Tuple = tokenizer_r.from_pretrained(A_ )
__lowerCAmelCase : str = tokenizer_p.from_pretrained(A_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A_ , A_ ) )
shutil.rmtree(A_ )
@require_torch
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
if not self.test_seqaseq:
return
__lowerCAmelCase : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Longer text that will definitely require truncation.
__lowerCAmelCase : int = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for'''
''' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons'''
''' will only worsen the violence and misery for millions of people.''',
]
__lowerCAmelCase : Optional[int] = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al'''
''' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi'''
''' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
try:
__lowerCAmelCase : str = tokenizer.prepare_seqaseq_batch(
src_texts=A_ , tgt_texts=A_ , max_length=3 , max_target_length=10 , return_tensors='''pt''' , src_lang='''eng_Latn''' , tgt_lang='''ron_Latn''' , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
__lowerCAmelCase : str = tokenizer.prepare_seqaseq_batch(
A_ , tgt_texts=A_ , max_length=3 , return_tensors='''pt''' )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
__lowerCAmelCase : List[str] = tokenizer.prepare_seqaseq_batch(
src_texts=A_ , max_length=3 , max_target_length=10 , return_tensors='''pt''' )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn('''decoder_input_ids''' , A_ )
@unittest.skip('''Unfortunately way too slow to build a BPE with SentencePiece.''' )
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
pass
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCAmelCase : Any = [AddedToken('''<special>''' , lstrip=A_ )]
__lowerCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A_ , additional_special_tokens=A_ , **A_ )
__lowerCAmelCase : Tuple = tokenizer_r.encode('''Hey this is a <special> token''' )
__lowerCAmelCase : List[str] = tokenizer_r.encode('''<special>''' , add_special_tokens=A_ )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
__lowerCAmelCase : str = self.rust_tokenizer_class.from_pretrained(
A_ , additional_special_tokens=A_ , **A_ , )
__lowerCAmelCase : int = self.tokenizer_class.from_pretrained(
A_ , additional_special_tokens=A_ , **A_ )
__lowerCAmelCase : Optional[Any] = tokenizer_p.encode('''Hey this is a <special> token''' )
__lowerCAmelCase : str = tokenizer_cr.encode('''Hey this is a <special> token''' )
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , A_ )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowercase (unittest.TestCase ):
_UpperCamelCase = """facebook/nllb-200-distilled-600M"""
_UpperCamelCase = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
_UpperCamelCase = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
_UpperCamelCase = [
256047,
16297,
134408,
8165,
248066,
14734,
950,
1135,
105721,
3573,
83,
27352,
108,
49486,
2,
]
@classmethod
def UpperCamelCase__ ( cls ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : NllbTokenizer = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''eng_Latn''' , tgt_lang='''ron_Latn''' )
__lowerCAmelCase : Tuple = 1
return cls
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Arab'''] , 25_6001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Latn'''] , 25_6002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''fra_Latn'''] , 25_6057 )
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A_ )
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
self.assertIn(A_ , self.tokenizer.all_special_ids )
# fmt: off
__lowerCAmelCase : Optional[Any] = [RO_CODE, 4254, 9_8068, 11_2923, 3_9072, 3909, 713, 10_2767, 26, 1_7314, 3_5642, 1_4683, 3_3118, 2022, 6_6987, 2, 25_6047]
# fmt: on
__lowerCAmelCase : List[Any] = self.tokenizer.decode(A_ , skip_special_tokens=A_ )
__lowerCAmelCase : Any = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A_ )
self.assertEqual(A_ , A_ )
self.assertNotIn(self.tokenizer.eos_token , A_ )
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , A_ )
__lowerCAmelCase : Optional[Any] = 10
__lowerCAmelCase : Optional[Any] = self.tokenizer(A_ , max_length=A_ , truncation=A_ ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , A_ )
self.assertEqual(len(A_ ) , A_ )
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_6203, 3] )
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
__lowerCAmelCase : List[Any] = tempfile.mkdtemp()
__lowerCAmelCase : Optional[int] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A_ )
__lowerCAmelCase : Optional[int] = NllbTokenizer.from_pretrained(A_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A_ )
@require_torch
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=A_ , truncation=A_ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
__lowerCAmelCase : Tuple = shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id['''ron_Latn'''] )
self.assertIsInstance(A_ , A_ )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
__lowerCAmelCase : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , A_ )
self.assertEqual(A_ , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : int = self.tokenizer(self.src_text , padding=A_ , truncation=A_ , max_length=3 , return_tensors='''pt''' )
__lowerCAmelCase : List[Any] = self.tokenizer(
text_target=self.tgt_text , padding=A_ , truncation=A_ , max_length=10 , return_tensors='''pt''' )
__lowerCAmelCase : Tuple = targets['''input_ids''']
__lowerCAmelCase : Any = shift_tokens_right(
A_ , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' )
self.assertEqual(
nested_simplify(A_ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[25_6047, 70, 7356, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_6057,
} , )
@require_torch
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Any = True
__lowerCAmelCase : List[str] = self.tokenizer(
'''UN Chief says there is no military solution in Syria''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' )
self.assertEqual(
inputs.input_ids , [1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2, 25_6047] )
__lowerCAmelCase : str = False
__lowerCAmelCase : int = self.tokenizer(
'''UN Chief says there is no military solution in Syria''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' )
self.assertEqual(
inputs.input_ids , [25_6047, 1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2] )
| 583
|
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase (_UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase = FunnelTokenizer
_UpperCamelCase = FunnelTokenizerFast
_UpperCamelCase = True
_UpperCamelCase = True
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
super().setUp()
__lowerCAmelCase : Optional[Any] = [
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__lowerCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def UpperCamelCase__ ( self , **A_ ) ->str:
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase__ ( self , **A_ ) ->Optional[int]:
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase__ ( self , A_ ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = '''UNwant\u00E9d,running'''
__lowerCAmelCase : Optional[Any] = '''unwanted, running'''
return input_text, output_text
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase : int = self.tokenizer_class(self.vocab_file )
__lowerCAmelCase : List[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(A_ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [7, 4, 5, 10, 8, 9] )
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Dict = self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
__lowerCAmelCase : List[Any] = tokenizer('''UNwant\u00E9d,running''' )
__lowerCAmelCase : List[str] = len(inputs['''input_ids'''] ) - 1
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len )
__lowerCAmelCase : Union[str, Any] = tokenizer('''UNwant\u00E9d,running''' , '''UNwant\u00E9d,running''' )
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len + [1] * sentence_len )
| 583
| 1
|
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _lowerCAmelCase ( A__: Any , A__: Optional[Any] , A__: str , A__: Dict ):
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def _lowerCAmelCase ( A__: List[Any] , A__: Union[str, Any] , A__: List[Any] , A__: Union[str, Any] , A__: List[Any]=True ):
'''simple docstring'''
model.train()
UpperCAmelCase = model(A__ )
UpperCAmelCase = F.mse_loss(A__ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(A__ )
def _lowerCAmelCase ( A__: Union[str, Any] , A__: Any=False ):
'''simple docstring'''
set_seed(42 )
UpperCAmelCase = RegressionModel()
UpperCAmelCase = deepcopy(A__ )
UpperCAmelCase = RegressionDataset(length=80 )
UpperCAmelCase = DataLoader(A__ , batch_size=16 )
model.to(accelerator.device )
if sched:
UpperCAmelCase = AdamW(params=model.parameters() , lr=1E-3 )
UpperCAmelCase = AdamW(params=ddp_model.parameters() , lr=1E-3 )
UpperCAmelCase = LambdaLR(A__ , lr_lambda=lambda A__ : epoch**0.65 )
UpperCAmelCase = LambdaLR(A__ , lr_lambda=lambda A__ : epoch**0.65 )
# Make a copy of `model`
if sched:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(A__ , A__ , A__ , A__ )
else:
UpperCAmelCase , UpperCAmelCase = accelerator.prepare(A__ , A__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _lowerCAmelCase ( A__: List[Any] ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = get_training_setup(A__ )
# Use a single batch
UpperCAmelCase , UpperCAmelCase = next(iter(A__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase , UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase , UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(A__ , A__ , A__ , A__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(A__ ):
step_model(A__ , A__ , A__ , A__ )
else:
# Sync grads
step_model(A__ , A__ , A__ , A__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(A__ , A__ , A__ , A__ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
UpperCAmelCase = ddp_input[torch.randperm(len(A__ ) )]
def _lowerCAmelCase ( A__: List[str] ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = get_training_setup(A__ )
# Use a single batch
UpperCAmelCase , UpperCAmelCase = next(iter(A__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase , UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase , UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(A__ , A__ , A__ , A__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(A__ ):
step_model(A__ , A__ , A__ , A__ )
else:
# Sync grads
step_model(A__ , A__ , A__ , A__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
UpperCAmelCase = ddp_input[torch.randperm(len(A__ ) )]
def _lowerCAmelCase ( A__: Tuple=False , A__: Union[str, Any]=False ):
'''simple docstring'''
UpperCAmelCase = Accelerator(
split_batches=A__ , dispatch_batches=A__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = get_training_setup(A__ )
for iteration, batch in enumerate(A__ ):
UpperCAmelCase , UpperCAmelCase = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase , UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase , UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(A__ , A__ , A__ , A__ , A__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(A__ ):
step_model(A__ , A__ , A__ , A__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(A__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
UpperCAmelCase = ddp_input[torch.randperm(len(A__ ) )]
GradientState._reset_state()
def _lowerCAmelCase ( A__: Union[str, Any]=False , A__: Union[str, Any]=False ):
'''simple docstring'''
UpperCAmelCase = Accelerator(
split_batches=A__ , dispatch_batches=A__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = get_training_setup(A__ , A__ )
for iteration, batch in enumerate(A__ ):
UpperCAmelCase , UpperCAmelCase = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase , UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase , UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(A__ , A__ , A__ , A__ , A__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(A__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(A__ ):
step_model(A__ , A__ , A__ , A__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n"""
UpperCAmelCase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(A__ ))
if accelerator.num_processes > 1:
check_model_parameters(A__ , A__ , A__ , A__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = Accelerator()
UpperCAmelCase = RegressionDataset(length=80 )
UpperCAmelCase = DataLoader(A__ , batch_size=16 )
UpperCAmelCase = RegressionDataset(length=96 )
UpperCAmelCase = DataLoader(A__ , batch_size=16 )
UpperCAmelCase , UpperCAmelCase = accelerator.prepare(A__ , A__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(A__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(A__ )
if iteration < len(A__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(A__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(A__ )
if batch_num < len(A__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = Accelerator()
UpperCAmelCase = accelerator.state
if state.local_process_index == 0:
print('''**Test `accumulate` gradient accumulation with dataloader break**''' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('''**Test NOOP `no_sync` context manager**''' )
test_noop_sync(A__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('''**Test Distributed `no_sync` context manager**''' )
test_distributed_sync(A__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation, ''' , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation(A__ , A__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation_with_opt_and_scheduler(A__ , A__ )
def _lowerCAmelCase ( A__: int ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 254
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__magic_name__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase ( A__ ):
'''simple docstring'''
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ) -> List[Any]:
"""simple docstring"""
super().__init__()
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
speech_model=_snake_case , speech_processor=_snake_case , vae=_snake_case , text_encoder=_snake_case , tokenizer=_snake_case , unet=_snake_case , scheduler=_snake_case , feature_extractor=_snake_case , )
def snake_case_ ( self , _snake_case = "auto" ) -> List[str]:
"""simple docstring"""
if slice_size == "auto":
UpperCAmelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_snake_case )
def snake_case_ ( self ) -> Any:
"""simple docstring"""
self.enable_attention_slicing(_snake_case )
@torch.no_grad()
def __call__( self , _snake_case , _snake_case=1_6000 , _snake_case = 512 , _snake_case = 512 , _snake_case = 50 , _snake_case = 7.5 , _snake_case = None , _snake_case = 1 , _snake_case = 0.0 , _snake_case = None , _snake_case = None , _snake_case = "pil" , _snake_case = True , _snake_case = None , _snake_case = 1 , **_snake_case , ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.speech_processor.feature_extractor(
_snake_case , return_tensors='''pt''' , sampling_rate=_snake_case ).input_features.to(self.device )
UpperCAmelCase = self.speech_model.generate(_snake_case , max_length=48_0000 )
UpperCAmelCase = self.speech_processor.tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case , normalize=_snake_case )[
0
]
if isinstance(_snake_case , _snake_case ):
UpperCAmelCase = 1
elif isinstance(_snake_case , _snake_case ):
UpperCAmelCase = len(_snake_case )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(_snake_case )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_snake_case , _snake_case ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(_snake_case )}.""" )
# get prompt text embeddings
UpperCAmelCase = self.tokenizer(
_snake_case , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
UpperCAmelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCAmelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
UpperCAmelCase = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCAmelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = text_embeddings.shape
UpperCAmelCase = text_embeddings.repeat(1 , _snake_case , 1 )
UpperCAmelCase = text_embeddings.view(bs_embed * num_images_per_prompt , _snake_case , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCAmelCase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCAmelCase = 42
if negative_prompt is None:
UpperCAmelCase = [''''''] * batch_size
elif type(_snake_case ) is not type(_snake_case ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(_snake_case )} !="""
f""" {type(_snake_case )}.""" )
elif isinstance(_snake_case , _snake_case ):
UpperCAmelCase = [negative_prompt]
elif batch_size != len(_snake_case ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(_snake_case )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
''' the batch size of `prompt`.''' )
else:
UpperCAmelCase = negative_prompt
UpperCAmelCase = text_input_ids.shape[-1]
UpperCAmelCase = self.tokenizer(
_snake_case , padding='''max_length''' , max_length=_snake_case , truncation=_snake_case , return_tensors='''pt''' , )
UpperCAmelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase = uncond_embeddings.shape[1]
UpperCAmelCase = uncond_embeddings.repeat(1 , _snake_case , 1 )
UpperCAmelCase = uncond_embeddings.view(batch_size * num_images_per_prompt , _snake_case , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCAmelCase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCAmelCase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCAmelCase = torch.randn(_snake_case , generator=_snake_case , device='''cpu''' , dtype=_snake_case ).to(
self.device )
else:
UpperCAmelCase = torch.randn(_snake_case , generator=_snake_case , device=self.device , dtype=_snake_case )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
UpperCAmelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_snake_case )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCAmelCase = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase = {}
if accepts_eta:
UpperCAmelCase = eta
for i, t in enumerate(self.progress_bar(_snake_case ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase = self.scheduler.scale_model_input(_snake_case , _snake_case )
# predict the noise residual
UpperCAmelCase = self.unet(_snake_case , _snake_case , encoder_hidden_states=_snake_case ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase = noise_pred.chunk(2 )
UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase = self.scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_snake_case , _snake_case , _snake_case )
UpperCAmelCase = 1 / 0.1_8215 * latents
UpperCAmelCase = self.vae.decode(_snake_case ).sample
UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(_snake_case )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=_snake_case , nsfw_content_detected=_snake_case )
| 254
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__A ={'configuration_gpt_neox': ['GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXConfig']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['GPTNeoXTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXForCausalLM',
'GPTNeoXForQuestionAnswering',
'GPTNeoXForSequenceClassification',
'GPTNeoXForTokenClassification',
'GPTNeoXLayer',
'GPTNeoXModel',
'GPTNeoXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 113
|
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__A =numpy.array([0, 0])
__A =numpy.array([0.5, 0.8_6_6_0_2_5_4])
__A =numpy.array([1, 0])
__A =[VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Union[str, Any] = initial_vectors
for _ in range(UpperCamelCase__ ):
UpperCAmelCase__ : Dict = iteration_step(UpperCamelCase__ )
return vectors
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : Optional[Any] = []
for i, start_vector in enumerate(vectors[:-1] ):
UpperCAmelCase__ : Tuple = vectors[i + 1]
new_vectors.append(UpperCamelCase__ )
UpperCAmelCase__ : Optional[Any] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Any = numpy.radians(UpperCamelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ : int = numpy.cos(UpperCamelCase__ ), numpy.sin(UpperCamelCase__ )
UpperCAmelCase__ : Dict = numpy.array(((c, -s), (s, c)) )
return numpy.dot(UpperCamelCase__ , UpperCamelCase__ )
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : Any = plt.gca()
axes.set_aspect("""equal""" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = zip(*UpperCamelCase__ )
plt.plot(UpperCamelCase__ , UpperCamelCase__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__A =iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 113
| 1
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A__ = imread(r'''digital_image_processing/image_data/lena_small.jpg''')
A__ = cvtColor(img, COLOR_BGR2GRAY)
def _lowerCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Dict = cn.convert_to_negative(UpperCamelCase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def _lowerCAmelCase ( ) -> Any:
"""simple docstring"""
with Image.open('''digital_image_processing/image_data/lena_small.jpg''' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(UpperCamelCase__ , 110 ) ).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''' )
def _lowerCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
snake_case__ : int = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _lowerCAmelCase ( ) -> int:
"""simple docstring"""
snake_case__ : Optional[int] = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
snake_case__ : Any = canny.canny(UpperCamelCase__ )
# assert canny array for at least one True
assert canny_array.any()
def _lowerCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
assert gg.gaussian_filter(UpperCamelCase__ , 5 , sigma=0.9 ).all()
def _lowerCAmelCase ( ) -> Dict:
"""simple docstring"""
snake_case__ : Optional[int] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
snake_case__ : Union[str, Any] = conv.img_convolve(UpperCamelCase__ , UpperCamelCase__ ).astype(UpperCamelCase__ )
assert res.any()
def _lowerCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
assert med.median_filter(UpperCamelCase__ , 3 ).any()
def _lowerCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
snake_case__ , snake_case__ : int = sob.sobel_filter(UpperCamelCase__ )
assert grad.any() and theta.any()
def _lowerCAmelCase ( ) -> Any:
"""simple docstring"""
snake_case__ : List[Any] = sp.make_sepia(UpperCamelCase__ , 20 )
assert sepia.all()
def _lowerCAmelCase ( __lowerCAmelCase = "digital_image_processing/image_data/lena_small.jpg" ) -> Tuple:
"""simple docstring"""
snake_case__ : int = bs.Burkes(imread(UpperCamelCase__ , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def _lowerCAmelCase ( __lowerCAmelCase = "digital_image_processing/image_data/lena_small.jpg" , ) -> List[Any]:
"""simple docstring"""
snake_case__ : Optional[Any] = rs.NearestNeighbour(imread(UpperCamelCase__ , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def _lowerCAmelCase ( ) -> int:
"""simple docstring"""
snake_case__ : str = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
snake_case__ : Union[str, Any] = imread(UpperCamelCase__ , 0 )
# Test for get_neighbors_pixel function() return not None
snake_case__ : Any = 0
snake_case__ : int = 0
snake_case__ : Union[str, Any] = image[x_coordinate][y_coordinate]
snake_case__ : Union[str, Any] = lbp.get_neighbors_pixel(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
snake_case__ : Union[str, Any] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
snake_case__ : Tuple = lbp.local_binary_value(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
assert lbp_image.any()
| 252
|
'''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> float:
if digit_amount > 0:
return round(number - int(UpperCamelCase__ ) , UpperCamelCase__ )
return number - int(UpperCamelCase__ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 546
| 0
|
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class lowerCamelCase ( _lowerCamelCase ):
'''simple docstring'''
UpperCamelCase__ =ComputeEnvironment.AMAZON_SAGEMAKER
UpperCamelCase__ =True
UpperCamelCase__ ='''ml.p3.2xlarge'''
UpperCamelCase__ ='''accelerate_sagemaker_execution_role'''
UpperCamelCase__ ='''hf-sm'''
UpperCamelCase__ ='''us-east-1'''
UpperCamelCase__ =1
UpperCamelCase__ ='''accelerate-sagemaker-1'''
UpperCamelCase__ ='''1.6'''
UpperCamelCase__ ='''4.4'''
UpperCamelCase__ ='''train.py'''
UpperCamelCase__ =[
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''False''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
UpperCamelCase__ =[
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''--do_test''',
'''False''',
'''--do_predict''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
__magic_name__ : int = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['''model_name_or_path'''] , lowerCamelCase_ )
assert isinstance(converted_args['''do_train'''] , lowerCamelCase_ )
assert isinstance(converted_args['''epochs'''] , lowerCamelCase_ )
assert isinstance(converted_args['''learning_rate'''] , lowerCamelCase_ )
assert isinstance(converted_args['''max_steps'''] , lowerCamelCase_ )
with pytest.raises(lowerCamelCase_ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 501
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowercase__ ( __A: str ,__A: Optional[Any] ,__A: List[Any] ,__A: str ,__A: List[str]=True ,__A: List[Any]="pt" ):
'''simple docstring'''
__magic_name__ : Optional[Any] = {'''add_prefix_space''': True} if isinstance(__A ,__A ) and not line.startswith(''' ''' ) else {}
__magic_name__ : List[str] = padding_side
return tokenizer(
[line] ,max_length=__A ,padding='''max_length''' if pad_to_max_length else None ,truncation=__A ,return_tensors=__A ,add_special_tokens=__A ,**__A ,)
def lowercase__ ( __A: int ,__A: int ,__A: Optional[Any]=None ,):
'''simple docstring'''
__magic_name__ : List[Any] = input_ids.ne(__A ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class lowerCamelCase ( _lowerCamelCase ):
'''simple docstring'''
def __init__( self : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any="train" , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : List[Any]="" , ) -> Union[str, Any]:
super().__init__()
__magic_name__ : Any = Path(lowerCamelCase_ ).joinpath(type_path + '''.source''' )
__magic_name__ : Tuple = Path(lowerCamelCase_ ).joinpath(type_path + '''.target''' )
__magic_name__ : str = self.get_char_lens(self.src_file )
__magic_name__ : Dict = max_source_length
__magic_name__ : Any = max_target_length
assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}'''
__magic_name__ : Union[str, Any] = tokenizer
__magic_name__ : str = prefix
if n_obs is not None:
__magic_name__ : Optional[int] = self.src_lens[:n_obs]
__magic_name__ : Optional[Any] = src_lang
__magic_name__ : Tuple = tgt_lang
def __len__( self : Optional[int] ) -> List[Any]:
return len(self.src_lens )
def __getitem__( self : Optional[int] , lowerCamelCase_ : Optional[int] ) -> Dict[str, torch.Tensor]:
__magic_name__ : Optional[int] = index + 1 # linecache starts at 1
__magic_name__ : Optional[int] = self.prefix + linecache.getline(str(self.src_file ) , lowerCamelCase_ ).rstrip('''\n''' )
__magic_name__ : Dict = linecache.getline(str(self.tgt_file ) , lowerCamelCase_ ).rstrip('''\n''' )
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowerCamelCase_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__magic_name__ : int = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCamelCase_ ) else self.tokenizer
)
__magic_name__ : Optional[Any] = self.tokenizer.generator if isinstance(self.tokenizer , lowerCamelCase_ ) else self.tokenizer
__magic_name__ : Tuple = encode_line(lowerCamelCase_ , lowerCamelCase_ , self.max_source_length , '''right''' )
__magic_name__ : int = encode_line(lowerCamelCase_ , lowerCamelCase_ , self.max_target_length , '''right''' )
__magic_name__ : Optional[Any] = source_inputs['''input_ids'''].squeeze()
__magic_name__ : List[str] = target_inputs['''input_ids'''].squeeze()
__magic_name__ : Optional[int] = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase_ : List[str] ) -> Dict:
return [len(lowerCamelCase_ ) for x in Path(lowerCamelCase_ ).open().readlines()]
def UpperCAmelCase__ ( self : Dict , lowerCamelCase_ : Optional[Any] ) -> Dict[str, torch.Tensor]:
__magic_name__ : str = torch.stack([x['''input_ids'''] for x in batch] )
__magic_name__ : Any = torch.stack([x['''attention_mask'''] for x in batch] )
__magic_name__ : Tuple = torch.stack([x['''decoder_input_ids'''] for x in batch] )
__magic_name__ : Tuple = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowerCamelCase_ )
else self.tokenizer.pad_token_id
)
__magic_name__ : Optional[Any] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowerCamelCase_ )
else self.tokenizer.pad_token_id
)
__magic_name__ : Dict = trim_batch(lowerCamelCase_ , lowerCamelCase_ )
__magic_name__ , __magic_name__ : Optional[int] = trim_batch(lowerCamelCase_ , lowerCamelCase_ , attention_mask=lowerCamelCase_ )
__magic_name__ : Optional[Any] = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
__lowerCamelCase : Dict = getLogger(__name__)
def lowercase__ ( __A: List[List] ):
'''simple docstring'''
return list(itertools.chain.from_iterable(__A ) )
def lowercase__ ( __A: str ):
'''simple docstring'''
__magic_name__ : Any = get_git_info()
save_json(__A ,os.path.join(__A ,'''git_log.json''' ) )
def lowercase__ ( __A: Optional[int] ,__A: Union[str, Any] ,__A: Union[str, Any]=4 ,**__A: Optional[Any] ):
'''simple docstring'''
with open(__A ,'''w''' ) as f:
json.dump(__A ,__A ,indent=__A ,**__A )
def lowercase__ ( __A: List[Any] ):
'''simple docstring'''
with open(__A ) as f:
return json.load(__A )
def lowercase__ ( ):
'''simple docstring'''
__magic_name__ : int = git.Repo(search_parent_directories=__A )
__magic_name__ : str = {
'''repo_id''': str(__A ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def lowercase__ ( __A: Callable ,__A: Iterable ):
'''simple docstring'''
return list(map(__A ,__A ) )
def lowercase__ ( __A: int ,__A: Dict ):
'''simple docstring'''
with open(__A ,'''wb''' ) as f:
return pickle.dump(__A ,__A )
def lowercase__ ( __A: Any ):
'''simple docstring'''
def remove_articles(__A: Any ):
return re.sub(r'''\b(a|an|the)\b''' ,''' ''' ,__A )
def white_space_fix(__A: Optional[int] ):
return " ".join(text.split() )
def remove_punc(__A: Any ):
__magic_name__ : Union[str, Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__A: Optional[int] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__A ) ) ) )
def lowercase__ ( __A: int ,__A: Optional[Any] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] = normalize_answer(__A ).split()
__magic_name__ : Optional[int] = normalize_answer(__A ).split()
__magic_name__ : Dict = Counter(__A ) & Counter(__A )
__magic_name__ : Tuple = sum(common.values() )
if num_same == 0:
return 0
__magic_name__ : int = 1.0 * num_same / len(__A )
__magic_name__ : Tuple = 1.0 * num_same / len(__A )
__magic_name__ : str = (2 * precision * recall) / (precision + recall)
return fa
def lowercase__ ( __A: List[Any] ,__A: Dict ):
'''simple docstring'''
return normalize_answer(__A ) == normalize_answer(__A )
def lowercase__ ( __A: List[str] ,__A: List[str] ):
'''simple docstring'''
assert len(__A ) == len(__A )
__magic_name__ : Optional[int] = 0
for hypo, pred in zip(__A ,__A ):
em += exact_match_score(__A ,__A )
if len(__A ) > 0:
em /= len(__A )
return {"em": em}
def lowercase__ ( __A: int ):
'''simple docstring'''
return model_prefix.startswith('''rag''' )
def lowercase__ ( __A: Optional[Any] ,__A: List[str] ,__A: Dict ):
'''simple docstring'''
__magic_name__ : Tuple = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__magic_name__ : Any = '''dropout_rate'''
for p in extra_params:
if getattr(__A ,__A ,__A ):
if not hasattr(__A ,__A ) and not hasattr(__A ,equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(__A ) )
delattr(__A ,__A )
continue
__magic_name__ : str = p if hasattr(__A ,__A ) else equivalent_param[p]
setattr(__A ,__A ,getattr(__A ,__A ) )
delattr(__A ,__A )
return hparams, config
| 501
| 1
|
"""simple docstring"""
def _lowerCamelCase ( UpperCAmelCase_ : int ) -> int:
"""simple docstring"""
assert (
isinstance(UpperCAmelCase_, UpperCAmelCase_ ) and number_of_steps > 0
), F"""number_of_steps needs to be positive integer, your input {number_of_steps}"""
if number_of_steps == 1:
return 1
A__ , A__ = 1, 1
for _ in range(number_of_steps - 1 ):
A__ , A__ = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104
|
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_lowerCamelCase : List[Any] = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
_lowerCamelCase : Any = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
_lowerCamelCase : Union[str, Any] = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def a_ ( __lowercase : List[Any] , __lowercase : Any ) -> Union[str, Any]:
return float((preds == labels).mean() )
def a_ ( __lowercase : Optional[Any] , __lowercase : List[str] ) -> Dict:
_snake_case = simple_accuracy(__lowercase , __lowercase )
_snake_case = float(fa_score(y_true=__lowercase , y_pred=__lowercase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a_ ( __lowercase : int , __lowercase : str ) -> str:
_snake_case = float(pearsonr(__lowercase , __lowercase )[0] )
_snake_case = float(spearmanr(__lowercase , __lowercase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def A ( self : List[Any] , lowercase : List[str] , lowercase : Optional[Any] ):
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(lowercase , lowercase )}
elif self.config_name == "stsb":
return pearson_and_spearman(lowercase , lowercase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(lowercase , lowercase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(lowercase , lowercase )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
| 686
| 0
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : Any = {
'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """dpt"""
def __init__( self , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.0 , A=0.0 , A=0.02 , A=1e-1_2 , A=3_8_4 , A=1_6 , A=3 , A=False , A=True , A=[2, 5, 8, 1_1] , A="project" , A=[4, 2, 1, 0.5] , A=[9_6, 1_9_2, 3_8_4, 7_6_8] , A=2_5_6 , A=-1 , A=False , A=True , A=0.4 , A=2_5_5 , A=0.1 , A=[1, 1_0_2_4, 2_4, 2_4] , A=[0, 1] , A=None , **A , ) -> Tuple:
super().__init__(**A )
snake_case : List[Any] = hidden_size
snake_case : List[Any] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("""Initializing the config with a `BiT` backbone.""" )
snake_case : Optional[Any] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
}
snake_case : Optional[Any] = BitConfig(**A )
elif isinstance(A , A ):
logger.info("""Initializing the config with a `BiT` backbone.""" )
snake_case : Tuple = BitConfig(**A )
elif isinstance(A , A ):
snake_case : Dict = backbone_config
else:
raise ValueError(
f"""backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.""" )
snake_case : Union[str, Any] = backbone_featmap_shape
snake_case : str = neck_ignore_stages
if readout_type != "project":
raise ValueError("""Readout type must be 'project' when using `DPT-hybrid` mode.""" )
else:
snake_case : Union[str, Any] = None
snake_case : Tuple = None
snake_case : List[Any] = []
snake_case : Tuple = num_hidden_layers
snake_case : Dict = num_attention_heads
snake_case : Dict = intermediate_size
snake_case : Optional[Any] = hidden_act
snake_case : Optional[Any] = hidden_dropout_prob
snake_case : int = attention_probs_dropout_prob
snake_case : Tuple = initializer_range
snake_case : int = layer_norm_eps
snake_case : Tuple = image_size
snake_case : int = patch_size
snake_case : Union[str, Any] = num_channels
snake_case : Tuple = qkv_bias
snake_case : List[str] = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("""Readout_type must be one of ['ignore', 'add', 'project']""" )
snake_case : Any = readout_type
snake_case : int = reassemble_factors
snake_case : Union[str, Any] = neck_hidden_sizes
snake_case : Optional[int] = fusion_hidden_size
snake_case : Optional[int] = head_in_index
snake_case : Tuple = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
snake_case : int = use_auxiliary_head
snake_case : Tuple = auxiliary_loss_weight
snake_case : str = semantic_loss_ignore_index
snake_case : Tuple = semantic_classifier_dropout
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Any = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
snake_case : Optional[Any] = self.backbone_config.to_dict()
snake_case : Tuple = self.__class__.model_type
return output
| 718
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 684
| 0
|
"""simple docstring"""
def _lowerCamelCase ( __a = 10, __a = 22 ):
SCREAMING_SNAKE_CASE_ = range(1, __a )
SCREAMING_SNAKE_CASE_ = range(1, __a )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(10, 22) = }''')
| 626
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
lowerCAmelCase__ = r'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n'
@add_start_docstrings(__lowercase )
class snake_case ( __lowercase ):
UpperCAmelCase__ = '''rag'''
UpperCAmelCase__ = True
def __init__(self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=" / " , SCREAMING_SNAKE_CASE_=" // " , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=3_00 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_="wiki_dpr" , SCREAMING_SNAKE_CASE_="train" , SCREAMING_SNAKE_CASE_="compressed" , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
super().__init__(
bos_token_id=SCREAMING_SNAKE_CASE_ , pad_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , forced_eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , prefix=SCREAMING_SNAKE_CASE_ , vocab_size=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
SCREAMING_SNAKE_CASE_ = kwargs.pop('''question_encoder''' )
SCREAMING_SNAKE_CASE_ = question_encoder_config.pop('''model_type''' )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''generator''' )
SCREAMING_SNAKE_CASE_ = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
SCREAMING_SNAKE_CASE_ = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = reduce_loss
SCREAMING_SNAKE_CASE_ = label_smoothing
SCREAMING_SNAKE_CASE_ = exclude_bos_score
SCREAMING_SNAKE_CASE_ = do_marginalize
SCREAMING_SNAKE_CASE_ = title_sep
SCREAMING_SNAKE_CASE_ = doc_sep
SCREAMING_SNAKE_CASE_ = n_docs
SCREAMING_SNAKE_CASE_ = max_combined_length
SCREAMING_SNAKE_CASE_ = dataset
SCREAMING_SNAKE_CASE_ = dataset_split
SCREAMING_SNAKE_CASE_ = index_name
SCREAMING_SNAKE_CASE_ = retrieval_vector_size
SCREAMING_SNAKE_CASE_ = retrieval_batch_size
SCREAMING_SNAKE_CASE_ = passages_path
SCREAMING_SNAKE_CASE_ = index_path
SCREAMING_SNAKE_CASE_ = use_dummy_dataset
SCREAMING_SNAKE_CASE_ = output_retrieved
SCREAMING_SNAKE_CASE_ = do_deduplication
SCREAMING_SNAKE_CASE_ = use_cache
if self.forced_eos_token_id is None:
SCREAMING_SNAKE_CASE_ = getattr(self.generator , '''forced_eos_token_id''' , SCREAMING_SNAKE_CASE_ )
@classmethod
def _lowercase (cls , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ = self.question_encoder.to_dict()
SCREAMING_SNAKE_CASE_ = self.generator.to_dict()
SCREAMING_SNAKE_CASE_ = self.__class__.model_type
return output
| 626
| 1
|
from collections import deque
def UpperCAmelCase ( _snake_case ):
lowerCAmelCase = len(_snake_case )
lowerCAmelCase = deque()
lowerCAmelCase = [False for _ in range(_snake_case )]
lowerCAmelCase = [-1 for _ in range(_snake_case )]
lowerCAmelCase = index_of[:]
def strong_connect(_snake_case , _snake_case , _snake_case ):
lowerCAmelCase = index # the number when this node is seen
lowerCAmelCase = index # lowest rank node reachable from here
index += 1
stack.append(_snake_case )
lowerCAmelCase = True
for w in g[v]:
if index_of[w] == -1:
lowerCAmelCase = strong_connect(_snake_case , _snake_case , _snake_case )
lowerCAmelCase = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
lowerCAmelCase = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
lowerCAmelCase = []
lowerCAmelCase = stack.pop()
lowerCAmelCase = False
component.append(_snake_case )
while w != v:
lowerCAmelCase = stack.pop()
lowerCAmelCase = False
component.append(_snake_case )
components.append(_snake_case )
return index
lowerCAmelCase = []
for v in range(_snake_case ):
if index_of[v] == -1:
strong_connect(_snake_case , 0 , _snake_case )
return components
def UpperCAmelCase ( _snake_case , _snake_case ):
lowerCAmelCase = [[] for _ in range(_snake_case )]
for u, v in edges:
g[u].append(_snake_case )
return g
if __name__ == "__main__":
# Test
UpperCAmelCase_ =7
UpperCAmelCase_ =[0, 0, 1, 2, 3, 3, 4, 4, 6]
UpperCAmelCase_ =[1, 3, 2, 0, 1, 4, 5, 6, 5]
UpperCAmelCase_ =[(u, v) for u, v in zip(source, target)]
UpperCAmelCase_ =create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 33
|
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ =logging.get_logger(__name__)
UpperCAmelCase_ ={
"""vocab_file""": """vocab.txt""",
"""merges_file""": """bpe.codes""",
}
UpperCAmelCase_ ={
"""vocab_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""",
},
"""merges_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""",
},
}
UpperCAmelCase_ ={
"""vinai/phobert-base""": 256,
"""vinai/phobert-large""": 256,
}
def UpperCAmelCase ( _snake_case ):
lowerCAmelCase = set()
lowerCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase = char
lowerCAmelCase = set(_snake_case )
return pairs
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
__a : Union[str, Any] =VOCAB_FILES_NAMES
__a : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
__a : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_="<s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="<s>" , UpperCAmelCase_="<unk>" , UpperCAmelCase_="<pad>" , UpperCAmelCase_="<mask>" , **UpperCAmelCase_ , ):
super().__init__(
bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCAmelCase = vocab_file
lowerCAmelCase = merges_file
lowerCAmelCase = {}
lowerCAmelCase = 0
lowerCAmelCase = 1
lowerCAmelCase = 2
lowerCAmelCase = 3
self.add_from_file(UpperCAmelCase_ )
lowerCAmelCase = {v: k for k, v in self.encoder.items()}
with open(UpperCAmelCase_ , encoding='''utf-8''' ) as merges_handle:
lowerCAmelCase = merges_handle.read().split('''\n''' )[:-1]
lowerCAmelCase = [tuple(merge.split()[:-1] ) for merge in merges]
lowerCAmelCase = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
lowerCAmelCase = {}
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase_ )) + [1]
return [1] + ([0] * len(UpperCAmelCase_ )) + [1, 1] + ([0] * len(UpperCAmelCase_ )) + [1]
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __snake_case ( self ):
return len(self.encoder )
def __snake_case ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __snake_case ( self , UpperCAmelCase_ ):
if token in self.cache:
return self.cache[token]
lowerCAmelCase = tuple(UpperCAmelCase_ )
lowerCAmelCase = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
lowerCAmelCase = get_pairs(UpperCAmelCase_ )
if not pairs:
return token
while True:
lowerCAmelCase = min(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : self.bpe_ranks.get(UpperCAmelCase_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase , lowerCAmelCase = bigram
lowerCAmelCase = []
lowerCAmelCase = 0
while i < len(UpperCAmelCase_ ):
try:
lowerCAmelCase = word.index(UpperCAmelCase_ , UpperCAmelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase = j
if word[i] == first and i < len(UpperCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase = tuple(UpperCAmelCase_ )
lowerCAmelCase = new_word
if len(UpperCAmelCase_ ) == 1:
break
else:
lowerCAmelCase = get_pairs(UpperCAmelCase_ )
lowerCAmelCase = '''@@ '''.join(UpperCAmelCase_ )
lowerCAmelCase = word[:-4]
lowerCAmelCase = word
return word
def __snake_case ( self , UpperCAmelCase_ ):
lowerCAmelCase = []
lowerCAmelCase = re.findall(r'''\S+\n?''' , UpperCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(UpperCAmelCase_ ).split(''' ''' ) ) )
return split_tokens
def __snake_case ( self , UpperCAmelCase_ ):
return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token ) )
def __snake_case ( self , UpperCAmelCase_ ):
return self.decoder.get(UpperCAmelCase_ , self.unk_token )
def __snake_case ( self , UpperCAmelCase_ ):
lowerCAmelCase = ''' '''.join(UpperCAmelCase_ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase = os.path.join(
UpperCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase = os.path.join(
UpperCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.vocab_file , UpperCAmelCase_ )
if os.path.abspath(self.merges_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.merges_file , UpperCAmelCase_ )
return out_vocab_file, out_merge_file
def __snake_case ( self , UpperCAmelCase_ ):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
try:
with open(UpperCAmelCase_ , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(UpperCAmelCase_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F"""Incorrect encoding detected in {f}, please rebuild the dataset""" )
return
lowerCAmelCase = f.readlines()
for lineTmp in lines:
lowerCAmelCase = lineTmp.strip()
lowerCAmelCase = line.rfind(''' ''' )
if idx == -1:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' )
lowerCAmelCase = line[:idx]
lowerCAmelCase = len(self.encoder )
| 33
| 1
|
'''simple docstring'''
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
UpperCamelCase__ = 'src/diffusers'
# Matches is_xxx_available()
UpperCamelCase__ = re.compile(r'is\_([a-z_]*)_available\(\)')
# Matches from xxx import bla
UpperCamelCase__ = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
UpperCamelCase__ = '\n{0} = None\n'
UpperCamelCase__ = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
UpperCamelCase__ = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Tuple = _re_backend.findall(_UpperCamelCase )
if len(_UpperCamelCase ) == 0:
return None
return "_and_".join(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
with open(os.path.join(_UpperCamelCase , "__init__.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
lowercase_ : Any = f.readlines()
# Get to the point we do the actual imports for type checking
lowercase_ : Optional[int] = 0
lowercase_ : Union[str, Any] = {}
# Go through the end of the file
while line_index < len(_UpperCamelCase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
lowercase_ : str = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("else:" ):
line_index += 1
line_index += 1
lowercase_ : int = []
# Until we unindent, add backend objects to the list
while line_index < len(_UpperCamelCase ) and len(lines[line_index] ) > 1:
lowercase_ : List[str] = lines[line_index]
lowercase_ : Tuple = _re_single_line_import.search(_UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(_UpperCamelCase ) > 0:
lowercase_ : List[Any] = objects
else:
line_index += 1
return backend_specific_objects
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(_UpperCamelCase )
elif name.islower():
return DUMMY_FUNCTION.format(_UpperCamelCase , _UpperCamelCase )
else:
return DUMMY_CLASS.format(_UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase=None ):
"""simple docstring"""
if backend_specific_objects is None:
lowercase_ : Optional[int] = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
lowercase_ : List[Any] = {}
for backend, objects in backend_specific_objects.items():
lowercase_ : List[str] = "[" + ", ".join(F"""\"{b}\"""" for b in backend.split("_and_" ) ) + "]"
lowercase_ : Union[str, Any] = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(_UpperCamelCase , _UpperCamelCase ) for o in objects] )
lowercase_ : Dict = dummy_file
return dummy_files
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase=False ):
"""simple docstring"""
lowercase_ : int = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
lowercase_ : List[str] = {"torch": "pt"}
# Locate actual dummy modules and read their content.
lowercase_ : str = os.path.join(_UpperCamelCase , "utils" )
lowercase_ : Optional[int] = {
backend: os.path.join(_UpperCamelCase , F"""dummy_{short_names.get(_UpperCamelCase , _UpperCamelCase )}_objects.py""" )
for backend in dummy_files.keys()
}
lowercase_ : List[Any] = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(_UpperCamelCase ):
with open(_UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
lowercase_ : str = f.read()
else:
lowercase_ : Union[str, Any] = ""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F"""Updating diffusers.utils.dummy_{short_names.get(_UpperCamelCase , _UpperCamelCase )}_objects.py as the main """
"__init__ has new objects." )
with open(dummy_file_paths[backend] , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"The main __init__ has objects that are not present in "
F"""diffusers.utils.dummy_{short_names.get(_UpperCamelCase , _UpperCamelCase )}_objects.py. Run `make fix-copies` """
"to fix this." )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCamelCase__ = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 620
|
'''simple docstring'''
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[Any] = WavaVecaForSequenceClassification.from_pretrained(_UpperCamelCase , config=_UpperCamelCase )
lowercase_ : Optional[int] = downstream_dict["projector.weight"]
lowercase_ : str = downstream_dict["projector.bias"]
lowercase_ : int = downstream_dict["model.post_net.linear.weight"]
lowercase_ : Optional[Any] = downstream_dict["model.post_net.linear.bias"]
return model
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Tuple = WavaVecaForAudioFrameClassification.from_pretrained(_UpperCamelCase , config=_UpperCamelCase )
lowercase_ : Any = downstream_dict["model.linear.weight"]
lowercase_ : List[str] = downstream_dict["model.linear.bias"]
return model
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = WavaVecaForXVector.from_pretrained(_UpperCamelCase , config=_UpperCamelCase )
lowercase_ : str = downstream_dict["connector.weight"]
lowercase_ : List[str] = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
lowercase_ : Union[str, Any] = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
lowercase_ : Dict = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
lowercase_ : Dict = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
lowercase_ : List[Any] = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
lowercase_ : Dict = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
lowercase_ : Dict = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
lowercase_ : Optional[Any] = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Tuple = torch.load(_UpperCamelCase , map_location="cpu" )
lowercase_ : Dict = checkpoint["Downstream"]
lowercase_ : Optional[Any] = WavaVecaConfig.from_pretrained(_UpperCamelCase )
lowercase_ : Dict = WavaVecaFeatureExtractor.from_pretrained(
_UpperCamelCase , return_attention_mask=_UpperCamelCase , do_normalize=_UpperCamelCase )
lowercase_ : Dict = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
lowercase_ : Any = convert_classification(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
elif arch.endswith("ForAudioFrameClassification" ):
lowercase_ : Optional[int] = convert_diarization(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
elif arch.endswith("ForXVector" ):
lowercase_ : List[Any] = convert_xvector(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
lowercase_ : List[str] = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(_UpperCamelCase )
hf_model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
UpperCamelCase__ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 620
| 1
|
import os
import sys
import unittest
a_ : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
a_ : str = os.path.join(git_repo_path, 'src', 'transformers')
a_ : Optional[int] = '\n{0} = None\n'
a_ : Any = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
a_ : Dict = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''' )
self.assertIsNone(UpperCamelCase__ )
__magic_name__ = find_backend(''' if not is_tokenizers_available():''' )
self.assertEqual(UpperCamelCase__ , '''tokenizers''' )
__magic_name__ = find_backend(''' if not is_tensorflow_text_available():''' )
self.assertEqual(UpperCamelCase__ , '''tensorflow_text''' )
__magic_name__ = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' )
self.assertEqual(UpperCamelCase__ , '''sentencepiece_and_tokenizers''' )
__magic_name__ = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' )
self.assertEqual(UpperCamelCase__ , '''sentencepiece_and_tensorflow_text''' )
__magic_name__ = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' )
self.assertEqual(UpperCamelCase__ , '''sentencepiece_and_tokenizers_and_vision''' )
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , UpperCamelCase__ )
self.assertIn('''tensorflow_text''' , UpperCamelCase__ )
self.assertIn('''sentencepiece_and_tokenizers''' , UpperCamelCase__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertModel''' , objects['''tf'''] )
self.assertIn('''FlaxBertModel''' , objects['''flax'''] )
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''] )
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''] )
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(UpperCamelCase__ , '''\nCONSTANT = None\n''' )
__magic_name__ = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
UpperCamelCase__ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
__magic_name__ = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
__magic_name__ = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
__magic_name__ = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , UpperCamelCase__ )
| 712
|
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
a_ : str = True
except ImportError:
a_ : Optional[int] = False
try:
from torch.hub import _get_torch_home
a_ : Optional[Any] = _get_torch_home()
except ImportError:
a_ : List[Any] = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
a_ : Any = os.path.join(torch_cache_home, 'transformers')
a_ : Any = 'https://cdn.huggingface.co'
a_ : Any = 'https://s3.amazonaws.com/models.huggingface.co/bert'
a_ : int = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
a_ : Any = os.path.join(PATH, 'config.yaml')
a_ : Any = os.path.join(PATH, 'attributes.txt')
a_ : Any = os.path.join(PATH, 'objects.txt')
a_ : List[Any] = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
a_ : Any = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
a_ : Optional[int] = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
a_ : int = 'pytorch_model.bin'
a_ : Union[str, Any] = 'config.yaml'
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any]=OBJECTS , snake_case_ : str=ATTRIBUTES ):
__magic_name__ = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
__magic_name__ = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ):
__magic_name__ = OrderedDict()
with open(snake_case_ , '''rb''' ) as f:
__magic_name__ = pkl.load(snake_case_ )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
__magic_name__ = ckp.pop(snake_case_ )
if isinstance(snake_case_ , np.ndarray ):
__magic_name__ = torch.tensor(snake_case_ )
else:
assert isinstance(snake_case_ , torch.tensor ), type(snake_case_ )
__magic_name__ = v
return r
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = {}
def __init__( self , A , A = "root" , A=0 ) -> List[str]:
'''simple docstring'''
__magic_name__ = name
__magic_name__ = level
__magic_name__ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__magic_name__ = copy.deepcopy(A )
__magic_name__ = copy.deepcopy(A )
if isinstance(A , A ):
__magic_name__ = Config(A , name=A , level=level + 1 )
__magic_name__ = v
setattr(self , A , A )
__magic_name__ = d
def __repr__( self ) -> Union[str, Any]:
'''simple docstring'''
return str(list((self._pointer.keys()) ) )
def __setattr__( self , A , A ) -> Tuple:
'''simple docstring'''
__magic_name__ = val
__magic_name__ = val
__magic_name__ = key.split('''.''' )
__magic_name__ = len(A ) - 1
__magic_name__ = self._pointer
if len(A ) > 1:
for i, l in enumerate(A ):
if hasattr(self , A ) and isinstance(getattr(self , A ) , A ):
setattr(getattr(self , A ) , '''.'''.join(levels[i:] ) , A )
if l == last_level:
__magic_name__ = val
else:
__magic_name__ = pointer[l]
def __A ( self ) -> List[Any]:
'''simple docstring'''
return self._pointer
def __A ( self , A , A ) -> Any:
'''simple docstring'''
with open(F'{file_name}' , '''w''' ) as stream:
dump(A , A )
def __A ( self , A , A ) -> List[Any]:
'''simple docstring'''
with open(F'{file_name}' , '''w''' ) as stream:
json.dump(A , A )
@staticmethod
def __A ( A ) -> Optional[Any]:
'''simple docstring'''
with open(A ) as stream:
__magic_name__ = load(A , Loader=A )
return data
def __str__( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = ''' '''
if self._name != "root":
__magic_name__ = F'{t * (self._level-1)}{self._name}:\n'
else:
__magic_name__ = ''''''
__magic_name__ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(A , A ):
r += F'{t * (self._level)}{v}\n'
self._level += 1
else:
r += F'{t * (self._level)}{k}: {v} ({type(A ).__name__})\n'
__magic_name__ = level
return r[:-1]
@classmethod
def __A ( cls , A , **A ) -> int:
'''simple docstring'''
__magic_name__ , __magic_name__ = cls.get_config_dict(A , **A )
return cls(A )
@classmethod
def __A ( cls , A , **A ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = kwargs.pop('''cache_dir''' , A )
__magic_name__ = kwargs.pop('''force_download''' , A )
__magic_name__ = kwargs.pop('''resume_download''' , A )
__magic_name__ = kwargs.pop('''proxies''' , A )
__magic_name__ = kwargs.pop('''local_files_only''' , A )
if os.path.isdir(A ):
__magic_name__ = os.path.join(A , A )
elif os.path.isfile(A ) or is_remote_url(A ):
__magic_name__ = pretrained_model_name_or_path
else:
__magic_name__ = hf_bucket_url(A , filename=A , use_cdn=A )
try:
# Load from URL or cache if already cached
__magic_name__ = cached_path(
A , cache_dir=A , force_download=A , proxies=A , resume_download=A , local_files_only=A , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__magic_name__ = Config.load_yaml(A )
except EnvironmentError:
__magic_name__ = '''Can\'t load config for'''
raise EnvironmentError(A )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(A ), kwargs
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple ):
__magic_name__ = torch.load('''dump.pt''' , map_location=in_tensor.device )
__magic_name__ = in_tensor.numpy()
__magic_name__ = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ), (
f'{sum([1 for x in np.isclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
__magic_name__ = urlparse(snake_case_ )
return parsed.scheme in ("http", "https")
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str , snake_case_ : Optional[Any]=True ):
__magic_name__ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__magic_name__ = '''/''' not in model_id
if legacy_format:
return f'{endpoint}/{model_id}-{filename}'
else:
return f'{endpoint}/{model_id}/{filename}'
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Tuple , snake_case_ : List[str]=None , snake_case_ : Dict=0 , snake_case_ : Tuple=None , ):
__magic_name__ = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(snake_case_ , snake_case_ ):
ua += "; " + "; ".join('''{}/{}'''.format(snake_case_ , snake_case_ ) for k, v in user_agent.items() )
elif isinstance(snake_case_ , snake_case_ ):
ua += "; " + user_agent
__magic_name__ = {'''user-agent''': ua}
if resume_size > 0:
__magic_name__ = '''bytes=%d-''' % (resume_size,)
__magic_name__ = requests.get(snake_case_ , stream=snake_case_ , proxies=snake_case_ , headers=snake_case_ )
if response.status_code == 416: # Range not satisfiable
return
__magic_name__ = response.headers.get('''Content-Length''' )
__magic_name__ = resume_size + int(snake_case_ ) if content_length is not None else None
__magic_name__ = tqdm(
unit='''B''' , unit_scale=snake_case_ , total=snake_case_ , initial=snake_case_ , desc='''Downloading''' , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(snake_case_ ) )
temp_file.write(snake_case_ )
progress.close()
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Dict=None , snake_case_ : int=False , snake_case_ : List[Any]=None , snake_case_ : Tuple=10 , snake_case_ : int=False , snake_case_ : Any=None , snake_case_ : Tuple=False , ):
if cache_dir is None:
__magic_name__ = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
__magic_name__ = None
if not local_files_only:
try:
__magic_name__ = requests.head(snake_case_ , allow_redirects=snake_case_ , proxies=snake_case_ , timeout=snake_case_ )
if response.status_code == 200:
__magic_name__ = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__magic_name__ = url_to_filename(snake_case_ , snake_case_ )
# get cache path to put the file
__magic_name__ = os.path.join(snake_case_ , snake_case_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(snake_case_ ):
return cache_path
else:
__magic_name__ = [
file
for file in fnmatch.filter(os.listdir(snake_case_ ) , filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(snake_case_ ) > 0:
return os.path.join(snake_case_ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(snake_case_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__magic_name__ = cache_path + '''.lock'''
with FileLock(snake_case_ ):
# If the download just completed while the lock was activated.
if os.path.exists(snake_case_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__magic_name__ = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(snake_case_ , '''a+b''' ) as f:
yield f
__magic_name__ = _resumable_file_manager
if os.path.exists(snake_case_ ):
__magic_name__ = os.stat(snake_case_ ).st_size
else:
__magic_name__ = 0
else:
__magic_name__ = partial(tempfile.NamedTemporaryFile , dir=snake_case_ , delete=snake_case_ )
__magic_name__ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' , snake_case_ , temp_file.name , )
http_get(
snake_case_ , snake_case_ , proxies=snake_case_ , resume_size=snake_case_ , user_agent=snake_case_ , )
os.replace(temp_file.name , snake_case_ )
__magic_name__ = {'''url''': url, '''etag''': etag}
__magic_name__ = cache_path + '''.json'''
with open(snake_case_ , '''w''' ) as meta_file:
json.dump(snake_case_ , snake_case_ )
return cache_path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : List[Any]=None ):
__magic_name__ = url.encode('''utf-8''' )
__magic_name__ = shaaaa(snake_case_ )
__magic_name__ = url_hash.hexdigest()
if etag:
__magic_name__ = etag.encode('''utf-8''' )
__magic_name__ = shaaaa(snake_case_ )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str=None , snake_case_ : Tuple=False , snake_case_ : Union[str, Any]=None , snake_case_ : List[Any]=False , snake_case_ : Union[str, Any]=None , snake_case_ : List[str]=False , snake_case_ : Optional[int]=False , snake_case_ : Optional[int]=False , ):
if cache_dir is None:
__magic_name__ = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
if is_remote_url(snake_case_ ):
# URL, so get it from the cache (downloading if necessary)
__magic_name__ = get_from_cache(
snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , proxies=snake_case_ , resume_download=snake_case_ , user_agent=snake_case_ , local_files_only=snake_case_ , )
elif os.path.exists(snake_case_ ):
# File, and it exists.
__magic_name__ = url_or_filename
elif urlparse(snake_case_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(snake_case_ ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(snake_case_ ) )
if extract_compressed_file:
if not is_zipfile(snake_case_ ) and not tarfile.is_tarfile(snake_case_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__magic_name__ , __magic_name__ = os.path.split(snake_case_ )
__magic_name__ = output_file.replace('''.''' , '''-''' ) + '''-extracted'''
__magic_name__ = os.path.join(snake_case_ , snake_case_ )
if os.path.isdir(snake_case_ ) and os.listdir(snake_case_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__magic_name__ = output_path + '''.lock'''
with FileLock(snake_case_ ):
shutil.rmtree(snake_case_ , ignore_errors=snake_case_ )
os.makedirs(snake_case_ )
if is_zipfile(snake_case_ ):
with ZipFile(snake_case_ , '''r''' ) as zip_file:
zip_file.extractall(snake_case_ )
zip_file.close()
elif tarfile.is_tarfile(snake_case_ ):
__magic_name__ = tarfile.open(snake_case_ )
tar_file.extractall(snake_case_ )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(snake_case_ ) )
return output_path_extracted
return output_path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : int="," ):
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
with open(snake_case_ ) as f:
__magic_name__ = eval(f.read() )
else:
__magic_name__ = requests.get(snake_case_ )
try:
__magic_name__ = requests.json()
except Exception:
__magic_name__ = req.content.decode()
assert data is not None, "could not connect"
try:
__magic_name__ = eval(snake_case_ )
except Exception:
__magic_name__ = data.split('''\n''' )
req.close()
return data
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] ):
__magic_name__ = requests.get(snake_case_ )
__magic_name__ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] ):
__magic_name__ = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(snake_case_ )
with open(snake_case_ , '''rb''' ) as stream:
__magic_name__ = pkl.load(snake_case_ )
__magic_name__ = weights.pop('''model''' )
__magic_name__ = {}
for k, v in model.items():
__magic_name__ = torch.from_numpy(snake_case_ )
if "running_var" in k:
__magic_name__ = torch.tensor([0] )
__magic_name__ = k.replace('''running_var''' , '''num_batches_tracked''' )
__magic_name__ = zero
return new
def _SCREAMING_SNAKE_CASE ( ):
print(f'{os.path.abspath(os.path.join(snake_case_ , os.pardir ) )}/demo.ipynb' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Tuple="RGB" ):
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
__magic_name__ = cva.imread(snake_case_ )
else:
__magic_name__ = get_image_from_url(snake_case_ )
assert img is not None, f'could not connect to: {im}'
__magic_name__ = cva.cvtColor(snake_case_ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__magic_name__ = img[:, :, ::-1]
return img
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Dict=1 ):
return (images[i : i + batch] for i in range(0 , len(snake_case_ ) , snake_case_ ))
| 678
| 0
|
UpperCAmelCase__ : Optional[int] = range(2, 20 + 1)
UpperCAmelCase__ : int = [10**k for k in range(ks[-1] + 1)]
UpperCAmelCase__ : dict[int, dict[int, list[list[int]]]] = {}
def A ( snake_case__ : str , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : str ) -> Any:
'''simple docstring'''
__snake_case = sum(a_i[j] for j in range(snake_case__ , len(snake_case__ ) ) )
__snake_case = sum(a_i[j] * base[j] for j in range(min(len(snake_case__ ) , snake_case__ ) ) )
__snake_case , __snake_case = 0, 0
__snake_case = n - i
__snake_case = memo.get(snake_case__ )
if sub_memo is not None:
__snake_case = sub_memo.get(snake_case__ )
if jumps is not None and len(snake_case__ ) > 0:
# find and make the largest jump without going over
__snake_case = -1
for _k in range(len(snake_case__ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
__snake_case = _k
break
if max_jump >= 0:
__snake_case , __snake_case , __snake_case = jumps[max_jump]
# since the difference between jumps is cached, add c
__snake_case = diff + c
for j in range(min(snake_case__ , len(snake_case__ ) ) ):
__snake_case , __snake_case = divmod(snake_case__ , 10 )
if new_c > 0:
add(snake_case__ , snake_case__ , snake_case__ )
else:
__snake_case = []
else:
__snake_case = {c: []}
__snake_case = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
__snake_case , __snake_case = next_term(snake_case__ , k - 1 , i + dn , snake_case__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
__snake_case , __snake_case = compute(snake_case__ , snake_case__ , i + dn , snake_case__ )
diff += _diff
dn += terms_jumped
__snake_case = sub_memo[c]
# keep jumps sorted by # of terms skipped
__snake_case = 0
while j < len(snake_case__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(snake_case__ , (diff, dn, k) )
return (diff, dn)
def A ( snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ) -> List[str]:
'''simple docstring'''
if i >= n:
return 0, i
if k > len(snake_case__ ):
a_i.extend([0 for _ in range(k - len(snake_case__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
__snake_case = i
__snake_case , __snake_case , __snake_case = 0, 0, 0
for j in range(len(snake_case__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
__snake_case = ds_c + ds_b
diff += addend
__snake_case = 0
for j in range(snake_case__ ):
__snake_case = a_i[j] + addend
__snake_case , __snake_case = divmod(snake_case__ , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(snake_case__ , snake_case__ , snake_case__ )
return diff, i - start_i
def A ( snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Any ) -> Dict:
'''simple docstring'''
for j in range(snake_case__ , len(snake_case__ ) ):
__snake_case = digits[j] + addend
if s >= 10:
__snake_case , __snake_case = divmod(snake_case__ , 10 )
__snake_case = addend // 10 + quotient
else:
__snake_case = s
__snake_case = addend // 10
if addend == 0:
break
while addend > 0:
__snake_case , __snake_case = divmod(snake_case__ , 10 )
digits.append(snake_case__ )
def A ( snake_case__ : int = 10**15 ) -> int:
'''simple docstring'''
__snake_case = [1]
__snake_case = 1
__snake_case = 0
while True:
__snake_case , __snake_case = next_term(snake_case__ , 20 , i + dn , snake_case__ )
dn += terms_jumped
if dn == n - i:
break
__snake_case = 0
for j in range(len(snake_case__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 313
|
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
UpperCAmelCase__ : Union[str, Any] = logging.getLogger(__name__)
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=None) -> List[Any]:
super().__init__(
lowercase_ , question_encoder_tokenizer=lowercase_ , generator_tokenizer=lowercase_ , index=lowercase_ , init_retrieval=lowercase_ , )
__snake_case = None
def _a ( self , lowercase_) -> Union[str, Any]:
logger.info('initializing retrieval')
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('dist initialized')
# needs to be set manually
__snake_case = self._infer_socket_ifname()
# avoid clash with the NCCL port
__snake_case = str(distributed_port + 1)
__snake_case = dist.new_group(ranks=lowercase_ , backend='gloo')
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('dist not initialized / main')
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group)
def _a ( self) -> int:
return dist.get_rank(group=self.process_group) == 0
def _a ( self , lowercase_ , lowercase_ , lowercase_=torch.floataa) -> Dict:
__snake_case = torch.empty(lowercase_ , dtype=lowercase_)
dist.scatter(lowercase_ , src=0 , scatter_list=lowercase_ , group=self.process_group)
return target_tensor
def _a ( self) -> str:
__snake_case = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
__snake_case = next((addr for addr in addrs if addr.startswith('e')) , lowercase_)
return ifname
def _a ( self , lowercase_ , lowercase_) -> Tuple[np.ndarray, List[dict]]:
# single GPU training
if not dist.is_initialized():
__snake_case , __snake_case = self._main_retrieve(lowercase_ , lowercase_)
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowercase_)
# distributed training
__snake_case = dist.get_world_size(group=self.process_group)
# gather logic
__snake_case = None
if self._is_main():
__snake_case = [torch.empty(question_hidden_states.shape , dtype=torch.floataa) for _ in range(lowercase_)]
dist.gather(torch.tensor(lowercase_) , dst=0 , gather_list=lowercase_ , group=self.process_group)
# scatter logic
__snake_case = question_hidden_states.shape[0]
__snake_case = []
__snake_case = []
if self._is_main():
assert len(lowercase_) == world_size
__snake_case , __snake_case = self._main_retrieve(torch.cat(lowercase_).numpy() , lowercase_)
__snake_case , __snake_case = torch.tensor(lowercase_), torch.tensor(lowercase_)
__snake_case = self._chunk_tensor(lowercase_ , lowercase_)
__snake_case = self._chunk_tensor(lowercase_ , lowercase_)
__snake_case = self._scattered(lowercase_ , [n_queries, n_docs] , target_type=torch.intaa)
__snake_case = self._scattered(lowercase_ , [n_queries, n_docs, question_hidden_states.shape[1]])
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(lowercase_)
| 313
| 1
|
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase_ = 'platform'
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def lowerCAmelCase_ ( __UpperCAmelCase: str , __UpperCAmelCase: List[Any] , __UpperCAmelCase: Any=None , __UpperCAmelCase: List[Any]=None , __UpperCAmelCase: int=None , __UpperCAmelCase: Optional[Any]=None , __UpperCAmelCase: Tuple=None , __UpperCAmelCase: Tuple=None , ) -> int:
if attention_mask is None:
UpperCamelCase__ : Union[str, Any] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
UpperCamelCase__ : Optional[int] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
UpperCamelCase__ : Union[str, Any] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCamelCase__ : str = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCamelCase__ : str = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__=13, __magic_name__=7, __magic_name__=True, __magic_name__=False, __magic_name__=99, __magic_name__=16, __magic_name__=2, __magic_name__=4, __magic_name__=4, __magic_name__="gelu", __magic_name__=0.1, __magic_name__=0.1, __magic_name__=32, __magic_name__=2, __magic_name__=1, __magic_name__=0, __magic_name__=0.02, ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = parent
UpperCamelCase__ : Optional[int] = batch_size
UpperCamelCase__ : int = seq_length
UpperCamelCase__ : int = is_training
UpperCamelCase__ : Optional[int] = use_labels
UpperCamelCase__ : List[Any] = vocab_size
UpperCamelCase__ : Union[str, Any] = hidden_size
UpperCamelCase__ : int = num_hidden_layers
UpperCamelCase__ : Dict = num_attention_heads
UpperCamelCase__ : Union[str, Any] = intermediate_size
UpperCamelCase__ : Optional[Any] = hidden_act
UpperCamelCase__ : Any = hidden_dropout_prob
UpperCamelCase__ : List[str] = attention_probs_dropout_prob
UpperCamelCase__ : Optional[Any] = max_position_embeddings
UpperCamelCase__ : Union[str, Any] = eos_token_id
UpperCamelCase__ : Optional[Any] = pad_token_id
UpperCamelCase__ : Tuple = bos_token_id
UpperCamelCase__ : Optional[int] = initializer_range
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : Dict = np.clip(ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size ), 3, self.vocab_size )
UpperCamelCase__ : Optional[int] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1), dtype=np.intaa )), -1 )
UpperCamelCase__ : Any = shift_tokens_right(_lowerCamelCase, 1, 2 )
UpperCamelCase__ : Union[str, Any] = BlenderbotConfig(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, initializer_range=self.initializer_range, use_cache=_lowerCamelCase, )
UpperCamelCase__ : List[Any] = prepare_blenderbot_inputs_dict(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
return config, inputs_dict
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : str = 20
UpperCamelCase__ : Union[str, Any] = model_class_name(_lowerCamelCase )
UpperCamelCase__ : List[str] = model.encode(inputs_dict['''input_ids'''] )
UpperCamelCase__ ,UpperCamelCase__ : Optional[int] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCamelCase__ : List[str] = model.init_cache(decoder_input_ids.shape[0], _lowerCamelCase, _lowerCamelCase )
UpperCamelCase__ : str = jnp.ones((decoder_input_ids.shape[0], max_decoder_length), dtype='''i4''' )
UpperCamelCase__ : str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), )
UpperCamelCase__ : Any = model.decode(
decoder_input_ids[:, :-1], _lowerCamelCase, decoder_attention_mask=_lowerCamelCase, past_key_values=_lowerCamelCase, decoder_position_ids=_lowerCamelCase, )
UpperCamelCase__ : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype='''i4''' )
UpperCamelCase__ : List[str] = model.decode(
decoder_input_ids[:, -1:], _lowerCamelCase, decoder_attention_mask=_lowerCamelCase, past_key_values=outputs_cache.past_key_values, decoder_position_ids=_lowerCamelCase, )
UpperCamelCase__ : Dict = model.decode(_lowerCamelCase, _lowerCamelCase )
UpperCamelCase__ : List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3, msg=f"Max diff is {diff}" )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Dict = 20
UpperCamelCase__ : List[Any] = model_class_name(_lowerCamelCase )
UpperCamelCase__ : Union[str, Any] = model.encode(inputs_dict['''input_ids'''] )
UpperCamelCase__ ,UpperCamelCase__ : int = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCamelCase__ : int = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
], axis=-1, )
UpperCamelCase__ : List[Any] = model.init_cache(decoder_input_ids.shape[0], _lowerCamelCase, _lowerCamelCase )
UpperCamelCase__ : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), )
UpperCamelCase__ : Optional[int] = model.decode(
decoder_input_ids[:, :-1], _lowerCamelCase, decoder_attention_mask=_lowerCamelCase, past_key_values=_lowerCamelCase, decoder_position_ids=_lowerCamelCase, )
UpperCamelCase__ : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype='''i4''' )
UpperCamelCase__ : Tuple = model.decode(
decoder_input_ids[:, -1:], _lowerCamelCase, past_key_values=outputs_cache.past_key_values, decoder_attention_mask=_lowerCamelCase, decoder_position_ids=_lowerCamelCase, )
UpperCamelCase__ : List[Any] = model.decode(_lowerCamelCase, _lowerCamelCase, decoder_attention_mask=_lowerCamelCase )
UpperCamelCase__ : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3, msg=f"Max diff is {diff}" )
@require_flax
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
a : str = 99
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
], dtype=np.intaa, )
UpperCamelCase__ : str = input_ids.shape[0]
UpperCamelCase__ : int = BlenderbotConfig(
vocab_size=self.vocab_size, d_model=24, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=32, decoder_ffn_dim=32, max_position_embeddings=48, eos_token_id=2, pad_token_id=1, bos_token_id=0, )
return config, input_ids, batch_size
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : str = self._get_config_and_data()
UpperCamelCase__ : int = FlaxBlenderbotForConditionalGeneration(_lowerCamelCase )
UpperCamelCase__ : Union[str, Any] = lm_model(input_ids=_lowerCamelCase )
UpperCamelCase__ : Union[str, Any] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape, _lowerCamelCase )
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = BlenderbotConfig(
vocab_size=self.vocab_size, d_model=14, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=8, decoder_ffn_dim=8, max_position_embeddings=48, )
UpperCamelCase__ : Union[str, Any] = FlaxBlenderbotForConditionalGeneration(_lowerCamelCase )
UpperCamelCase__ : Any = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]], dtype=np.intaa )
UpperCamelCase__ : Optional[int] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]], dtype=np.intaa )
UpperCamelCase__ : Optional[int] = lm_model(input_ids=_lowerCamelCase, decoder_input_ids=_lowerCamelCase )
UpperCamelCase__ : int = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape, _lowerCamelCase )
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : int = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]], dtype=np.intaa )
UpperCamelCase__ : List[str] = shift_tokens_right(_lowerCamelCase, 1, 2 )
UpperCamelCase__ : Any = np.equal(_lowerCamelCase, 1 ).astype(np.floataa ).sum()
UpperCamelCase__ : Union[str, Any] = np.equal(_lowerCamelCase, 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape, input_ids.shape )
self.assertEqual(_lowerCamelCase, n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0], 2 ).all() )
@require_flax
class lowercase__ ( __UpperCAmelCase , unittest.TestCase , __UpperCAmelCase ):
'''simple docstring'''
a : Tuple = True
a : Optional[int] = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
a : str = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : str = FlaxBlenderbotModelTester(self )
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase__ : int = self._prepare_for_class(_lowerCamelCase, _lowerCamelCase )
UpperCamelCase__ : Any = model_class(_lowerCamelCase )
@jax.jit
def encode_jitted(__magic_name__, __magic_name__=None, **__magic_name__ ):
return model.encode(input_ids=_lowerCamelCase, attention_mask=_lowerCamelCase )
with self.subTest('''JIT Enabled''' ):
UpperCamelCase__ : Optional[int] = encode_jitted(**_lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCamelCase__ : str = encode_jitted(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ), len(_lowerCamelCase ) )
for jitted_output, output in zip(_lowerCamelCase, _lowerCamelCase ):
self.assertEqual(jitted_output.shape, output.shape )
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase__ : Dict = model_class(_lowerCamelCase )
UpperCamelCase__ : Optional[Any] = model.encode(inputs_dict['''input_ids'''], inputs_dict['''attention_mask'''] )
UpperCamelCase__ : Optional[int] = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(__magic_name__, __magic_name__, __magic_name__ ):
return model.decode(
decoder_input_ids=_lowerCamelCase, decoder_attention_mask=_lowerCamelCase, encoder_outputs=_lowerCamelCase, )
with self.subTest('''JIT Enabled''' ):
UpperCamelCase__ : str = decode_jitted(**_lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCamelCase__ : int = decode_jitted(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ), len(_lowerCamelCase ) )
for jitted_output, output in zip(_lowerCamelCase, _lowerCamelCase ):
self.assertEqual(jitted_output.shape, output.shape )
@slow
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCamelCase__ : Any = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
UpperCamelCase__ : Tuple = np.ones((1, 1) ) * model.config.eos_token_id
UpperCamelCase__ : Union[str, Any] = model(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@unittest.skipUnless(jax_device != '''cpu''', '''3B test too slow on CPU.''' )
@slow
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
UpperCamelCase__ : str = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
UpperCamelCase__ : Dict = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''', from_pt=_lowerCamelCase )
UpperCamelCase__ : Union[str, Any] = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
UpperCamelCase__ : List[str] = ['''Sam''']
UpperCamelCase__ : Optional[Any] = tokenizer(_lowerCamelCase, return_tensors='''jax''' )
UpperCamelCase__ : str = model.generate(**_lowerCamelCase, **_lowerCamelCase )
UpperCamelCase__ : Dict = '''Sam is a great name. It means "sun" in Gaelic.'''
UpperCamelCase__ : str = tokenizer.batch_decode(_lowerCamelCase, **_lowerCamelCase )
assert generated_txt[0].strip() == tgt_text
| 701
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : Optional[int] = "marian"
a : Optional[Any] = ["past_key_values"]
a : Union[str, Any] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self, __magic_name__=58101, __magic_name__=None, __magic_name__=1024, __magic_name__=12, __magic_name__=4096, __magic_name__=16, __magic_name__=12, __magic_name__=4096, __magic_name__=16, __magic_name__=0.0, __magic_name__=0.0, __magic_name__=True, __magic_name__=True, __magic_name__="gelu", __magic_name__=1024, __magic_name__=0.1, __magic_name__=0.0, __magic_name__=0.0, __magic_name__=0.02, __magic_name__=58100, __magic_name__=False, __magic_name__=58100, __magic_name__=0, __magic_name__=0, __magic_name__=True, **__magic_name__, ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = vocab_size
UpperCamelCase__ : Dict = decoder_vocab_size or vocab_size
UpperCamelCase__ : Any = max_position_embeddings
UpperCamelCase__ : Any = d_model
UpperCamelCase__ : Union[str, Any] = encoder_ffn_dim
UpperCamelCase__ : Optional[Any] = encoder_layers
UpperCamelCase__ : List[str] = encoder_attention_heads
UpperCamelCase__ : str = decoder_ffn_dim
UpperCamelCase__ : str = decoder_layers
UpperCamelCase__ : Optional[int] = decoder_attention_heads
UpperCamelCase__ : str = dropout
UpperCamelCase__ : Optional[Any] = attention_dropout
UpperCamelCase__ : List[Any] = activation_dropout
UpperCamelCase__ : int = activation_function
UpperCamelCase__ : Optional[int] = init_std
UpperCamelCase__ : Any = encoder_layerdrop
UpperCamelCase__ : Tuple = decoder_layerdrop
UpperCamelCase__ : Dict = use_cache
UpperCamelCase__ : str = encoder_layers
UpperCamelCase__ : str = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCamelCase__ : Union[str, Any] = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=__magic_name__, eos_token_id=__magic_name__, is_encoder_decoder=__magic_name__, decoder_start_token_id=__magic_name__, forced_eos_token_id=__magic_name__, **__magic_name__, )
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase__ : str = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
UpperCamelCase__ : str = {0: '''batch'''}
UpperCamelCase__ : List[str] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
UpperCamelCase__ : List[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
UpperCamelCase__ : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__magic_name__, direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCamelCase__ : Any = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
UpperCamelCase__ ,UpperCamelCase__ : List[str] = self.num_layers
for i in range(__magic_name__ ):
UpperCamelCase__ : Tuple = {0: '''batch''', 2: '''past_sequence + sequence'''}
UpperCamelCase__ : List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
UpperCamelCase__ : Optional[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase__ : Optional[Any] = super().outputs
else:
UpperCamelCase__ : Tuple = super(__magic_name__, self ).outputs
if self.use_past:
UpperCamelCase__ ,UpperCamelCase__ : Tuple = self.num_layers
for i in range(__magic_name__ ):
UpperCamelCase__ : str = {0: '''batch''', 2: '''past_sequence + sequence'''}
UpperCamelCase__ : Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = -1, __magic_name__ = -1, __magic_name__ = False, __magic_name__ = None, ) -> Mapping[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Dict = self._generate_dummy_inputs_for_encoder_and_decoder(
__magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__ )
# Generate decoder inputs
UpperCamelCase__ : Optional[Any] = seq_length if not self.use_past else 1
UpperCamelCase__ : List[str] = self._generate_dummy_inputs_for_encoder_and_decoder(
__magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__ )
UpperCamelCase__ : Dict = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
UpperCamelCase__ : Dict = dict(**__magic_name__, **__magic_name__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCamelCase__ ,UpperCamelCase__ : Tuple = common_inputs['''input_ids'''].shape
UpperCamelCase__ : Optional[int] = common_inputs['''decoder_input_ids'''].shape[1]
UpperCamelCase__ ,UpperCamelCase__ : Optional[Any] = self.num_attention_heads
UpperCamelCase__ : Tuple = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCamelCase__ : Optional[int] = decoder_seq_length + 3
UpperCamelCase__ : Dict = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCamelCase__ : List[str] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(__magic_name__, __magic_name__ )], dim=1 )
UpperCamelCase__ : Any = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCamelCase__ ,UpperCamelCase__ : Optional[Any] = self.num_layers
UpperCamelCase__ : Union[str, Any] = min(__magic_name__, __magic_name__ )
UpperCamelCase__ : List[Any] = max(__magic_name__, __magic_name__ ) - min_num_layers
UpperCamelCase__ : str = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(__magic_name__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(__magic_name__ ),
torch.zeros(__magic_name__ ),
torch.zeros(__magic_name__ ),
torch.zeros(__magic_name__ ),
) )
# TODO: test this.
UpperCamelCase__ : List[Any] = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(__magic_name__, __magic_name__ ):
common_inputs["past_key_values"].append((torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) )
return common_inputs
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = -1, __magic_name__ = -1, __magic_name__ = False, __magic_name__ = None, ) -> Mapping[str, Any]:
"""simple docstring"""
UpperCamelCase__ : int = self._generate_dummy_inputs_for_encoder_and_decoder(
__magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCamelCase__ ,UpperCamelCase__ : Union[str, Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
UpperCamelCase__ : Dict = seqlen + 2
UpperCamelCase__ ,UpperCamelCase__ : Dict = self.num_layers
UpperCamelCase__ ,UpperCamelCase__ : str = self.num_attention_heads
UpperCamelCase__ : Tuple = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCamelCase__ : int = common_inputs['''attention_mask'''].dtype
UpperCamelCase__ : Union[str, Any] = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(__magic_name__, __magic_name__, dtype=__magic_name__ )], dim=1 )
UpperCamelCase__ : Union[str, Any] = [
(torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(__magic_name__ )
]
return common_inputs
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = -1, __magic_name__ = -1, __magic_name__ = False, __magic_name__ = None, ) -> Mapping[str, Any]:
"""simple docstring"""
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase__ : Any = compute_effective_axis_dimension(
__magic_name__, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase__ : Optional[int] = tokenizer.num_special_tokens_to_add(__magic_name__ )
UpperCamelCase__ : Dict = compute_effective_axis_dimension(
__magic_name__, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=__magic_name__ )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase__ : Dict = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCamelCase__ : Optional[Any] = dict(tokenizer(__magic_name__, return_tensors=__magic_name__ ) )
return common_inputs
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = -1, __magic_name__ = -1, __magic_name__ = False, __magic_name__ = None, ) -> Mapping[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase__ : int = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__magic_name__, batch_size=__magic_name__, seq_length=__magic_name__, is_pair=__magic_name__, framework=__magic_name__ )
else:
UpperCamelCase__ : Optional[Any] = self._generate_dummy_inputs_for_causal_lm(
__magic_name__, batch_size=__magic_name__, seq_length=__magic_name__, is_pair=__magic_name__, framework=__magic_name__ )
return common_inputs
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__ ) -> int:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase__ : List[str] = super()._flatten_past_key_values_(__magic_name__, __magic_name__, __magic_name__, __magic_name__ )
else:
UpperCamelCase__ : Dict = super(__magic_name__, self )._flatten_past_key_values_(
__magic_name__, __magic_name__, __magic_name__, __magic_name__ )
@property
def UpperCamelCase__ ( self ) -> float:
"""simple docstring"""
return 1E-4
| 369
| 0
|
"""simple docstring"""
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ = 100_0000 ) ->List[Any]:
_lowerCamelCase : Tuple = limit + 1
_lowerCamelCase : Tuple = [0] * limit
for first_term in range(1 , _snake_case ):
for n in range(_snake_case , _snake_case , _snake_case ):
_lowerCamelCase : Optional[int] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_lowerCamelCase : List[Any] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 434
|
"""simple docstring"""
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = (DPMSolverSDEScheduler,)
SCREAMING_SNAKE_CASE = 10
def _UpperCamelCase ( self ,**A ):
UpperCAmelCase = {
"""num_train_timesteps""": 1_100,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**A )
return config
def _UpperCamelCase ( self ):
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=A )
def _UpperCamelCase ( self ):
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] ,[0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=A ,beta_end=A )
def _UpperCamelCase ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=A )
def _UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**A )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase = sample.to(A )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase = scheduler.scale_model_input(A ,A )
UpperCAmelCase = model(A ,A )
UpperCAmelCase = scheduler.step(A ,A ,A )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(A ) )
UpperCAmelCase = torch.mean(torch.abs(A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875 ) < 1e-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406 ) < 1e-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def _UpperCamelCase ( self ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
UpperCAmelCase = scheduler_class(**A )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase = sample.to(A )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase = scheduler.scale_model_input(A ,A )
UpperCAmelCase = model(A ,A )
UpperCAmelCase = scheduler.step(A ,A ,A )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(A ) )
UpperCAmelCase = torch.mean(torch.abs(A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453 ) < 1e-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703 ) < 1e-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8487548828125 ) < 1e-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3
def _UpperCamelCase ( self ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**A )
scheduler.set_timesteps(self.num_inference_steps ,device=A )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter.to(A ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
UpperCAmelCase = scheduler.scale_model_input(A ,A )
UpperCAmelCase = model(A ,A )
UpperCAmelCase = scheduler.step(A ,A ,A )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(A ) )
UpperCAmelCase = torch.mean(torch.abs(A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938 ) < 1e-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312 ) < 1e-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def _UpperCamelCase ( self ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**A ,use_karras_sigmas=A )
scheduler.set_timesteps(self.num_inference_steps ,device=A )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter.to(A ) * scheduler.init_noise_sigma
UpperCAmelCase = sample.to(A )
for t in scheduler.timesteps:
UpperCAmelCase = scheduler.scale_model_input(A ,A )
UpperCAmelCase = model(A ,A )
UpperCAmelCase = scheduler.step(A ,A ,A )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(A ) )
UpperCAmelCase = torch.mean(torch.abs(A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3135223388672 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
| 341
| 0
|
from __future__ import annotations
def UpperCamelCase ( snake_case__ : list[int] ,snake_case__ : int ):
'''simple docstring'''
if len(snake_case__ ) < k or k < 0:
raise ValueError("""Invalid Input""" )
__snake_case :List[Any] = sum(array[:k] )
for i in range(len(snake_case__ ) - k ):
__snake_case :Optional[int] = current_sum - array[i] + array[i + k]
__snake_case :Union[str, Any] = max(snake_case__ ,snake_case__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
lowerCamelCase__ = [randint(-1000, 1000) for i in range(100)]
lowerCamelCase__ = randint(0, 110)
print(f'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 291
|
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__ ( lowercase_ , unittest.TestCase):
'''simple docstring'''
lowerCamelCase : Optional[int] = GPTSanJapaneseTokenizer
lowerCamelCase : Any = False
lowerCamelCase : Optional[Any] = {"do_clean_text": False, "add_prefix_space": False}
def __lowercase ( self ) -> Any:
'''simple docstring'''
super().setUp()
# fmt: off
__snake_case :str = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
__snake_case :Optional[Any] = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
__snake_case :Optional[int] = {"""unk_token""": """<unk>"""}
__snake_case :Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__snake_case :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(a__ ) )
def __lowercase ( self , **a__ ) -> Optional[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **a__ )
def __lowercase ( self , a__ ) -> List[str]:
'''simple docstring'''
__snake_case :Tuple = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
__snake_case :Union[str, Any] = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def __lowercase ( self , a__ ) -> List[str]:
'''simple docstring'''
__snake_case , __snake_case :Dict = self.get_input_output_texts(a__ )
__snake_case :Dict = tokenizer.encode(a__ , add_special_tokens=a__ )
__snake_case :int = tokenizer.decode(a__ , clean_up_tokenization_spaces=a__ )
return text, ids
def __lowercase ( self ) -> Optional[int]:
'''simple docstring'''
pass # TODO add if relevant
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
pass # TODO add if relevant
def __lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass # TODO add if relevant
def __lowercase ( self ) -> str:
'''simple docstring'''
__snake_case :Optional[int] = self.get_tokenizer()
# Testing tokenization
__snake_case :int = """こんにちは、世界。 こんばんは、㔺界。"""
__snake_case :Dict = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
__snake_case :Optional[Any] = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids without special tokens
__snake_case :List[str] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__snake_case :Any = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids with special tokens
__snake_case :Tuple = tokens + [tokenizer.unk_token]
__snake_case :Optional[int] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
__snake_case :List[Any] = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(a__ , a__ )
def __lowercase ( self ) -> List[str]:
'''simple docstring'''
__snake_case :Tuple = self.get_tokenizer()
# Testing tokenization
__snake_case :Optional[Any] = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
__snake_case :Tuple = """こんにちは、、、、世界。こんばんは、、、、世界。"""
__snake_case :str = tokenizer.encode(a__ )
__snake_case :Optional[int] = tokenizer.decode(a__ )
self.assertEqual(a__ , a__ )
@slow
def __lowercase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case :List[str] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__snake_case :str = """こんにちは、世界。"""
__snake_case :List[str] = """こんばんは、㔺界。😀"""
__snake_case :Dict = """こんにちは、世界。こんばんは、世界。😀"""
__snake_case :int = tokenizer.encode(prefix_text + input_text )
__snake_case :Optional[Any] = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
__snake_case :Any = tokenizer.encode(a__ , prefix_text=a__ )
__snake_case :Optional[Any] = tokenizer.decode(a__ )
__snake_case :Optional[Any] = tokenizer.decode(a__ )
__snake_case :Optional[Any] = tokenizer.decode(a__ )
self.assertEqual(a__ , a__ )
self.assertEqual(a__ , a__ )
self.assertEqual(a__ , a__ )
@slow
def __lowercase ( self ) -> Any:
'''simple docstring'''
__snake_case :Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__snake_case :List[Any] = """こんにちは、世界。"""
__snake_case :Dict = """こんばんは、㔺界。😀"""
__snake_case :Optional[int] = len(tokenizer.encode(a__ ) ) - 2
__snake_case :Union[str, Any] = len(tokenizer.encode(a__ ) ) - 2
__snake_case :Union[str, Any] = [1] + [0] * (len_prefix + len_text + 1)
__snake_case :Optional[Any] = [1] * (len_prefix + len_text + 1) + [0]
__snake_case :Union[str, Any] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__snake_case :int = tokenizer(prefix_text + input_text ).token_type_ids
__snake_case :List[Any] = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
__snake_case :int = tokenizer(a__ , prefix_text=a__ ).token_type_ids
self.assertListEqual(a__ , a__ )
self.assertListEqual(a__ , a__ )
self.assertListEqual(a__ , a__ )
@slow
def __lowercase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case :Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__snake_case :Tuple = tokenizer.encode("""あンいワ""" )
__snake_case :List[str] = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
__snake_case :int = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(a__ ) , tokenizer.decode(a__ ) )
self.assertEqual(tokenizer.decode(a__ ) , tokenizer.decode(a__ ) )
self.assertNotEqual(a__ , a__ )
self.assertNotEqual(a__ , a__ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def __lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case :List[str] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__snake_case :int = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
__snake_case :Optional[int] = tokenizer(a__ , padding=a__ )
__snake_case :int = tokenizer.batch_encode_plus(a__ , padding=a__ )
# fmt: off
__snake_case :Tuple = [[3_59_93, 86_40, 2_59_48, 3_59_98, 3_06_47, 3_56_75, 3_59_99, 3_59_99], [3_59_93, 1_03_82, 98_68, 3_59_98, 3_06_46, 94_59, 3_06_46, 3_56_75]]
__snake_case :Any = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__snake_case :int = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , a__ )
self.assertListEqual(x_token.token_type_ids , a__ )
self.assertListEqual(x_token.attention_mask , a__ )
self.assertListEqual(x_token_a.input_ids , a__ )
self.assertListEqual(x_token_a.token_type_ids , a__ )
self.assertListEqual(x_token_a.attention_mask , a__ )
def __lowercase ( self ) -> List[str]:
'''simple docstring'''
pass
def __lowercase ( self ) -> Optional[int]:
'''simple docstring'''
pass
| 291
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
_A : Union[str, Any] = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ : str = """facebook/nllb-200-distilled-600M"""
lowerCamelCase__ : Optional[Any] = (
"""This is a tool that translates text from a language to another. It takes three inputs: `text`, which should """
"""be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, """
"""which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in """
"""plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."""
)
lowerCamelCase__ : Union[str, Any] = """translator"""
lowerCamelCase__ : List[str] = AutoTokenizer
lowerCamelCase__ : Any = AutoModelForSeqaSeqLM
lowerCamelCase__ : Tuple = LANGUAGE_CODES
lowerCamelCase__ : str = ["""text""", """text""", """text"""]
lowerCamelCase__ : Tuple = ["""text"""]
def lowercase_ ( self , A_ , A_ , A_ ):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(f'''{src_lang} is not a supported language.''' )
if tgt_lang not in self.lang_to_code:
raise ValueError(f'''{tgt_lang} is not a supported language.''' )
SCREAMING_SNAKE_CASE__ = self.lang_to_code[src_lang]
SCREAMING_SNAKE_CASE__ = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
A_ , return_tensors='''pt''' , src_lang=A_ , tgt_lang=A_ )
def lowercase_ ( self , A_ ):
'''simple docstring'''
return self.model.generate(**A_ )
def lowercase_ ( self , A_ ):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=A_ )
| 100
|
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class UpperCamelCase ( lowercase_ , lowercase_ ):
@register_to_config
def __init__( self ,__UpperCamelCase = 768 ,) -> str:
'''simple docstring'''
super().__init__()
lowercase_ : Union[str, Any] = nn.Parameter(torch.zeros(1 ,__UpperCamelCase ) )
lowercase_ : Optional[Any] = nn.Parameter(torch.ones(1 ,__UpperCamelCase ) )
def _UpperCAmelCase ( self ,__UpperCamelCase = None ,__UpperCamelCase = None ,) -> Tuple:
'''simple docstring'''
lowercase_ : Tuple = nn.Parameter(self.mean.to(__UpperCamelCase ).to(__UpperCamelCase ) )
lowercase_ : str = nn.Parameter(self.std.to(__UpperCamelCase ).to(__UpperCamelCase ) )
return self
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> str:
'''simple docstring'''
lowercase_ : Dict = (embeds - self.mean) * 1.0 / self.std
return embeds
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : Any = (embeds * self.std) + self.mean
return embeds
| 425
| 0
|
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
A_ = ''''''
A_ = ''''''
A_ = ''''''
A_ = 1 # (0 is vertical, 1 is horizontal)
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Any = get_dataset(snake_case__ , snake_case__ )
print("""Processing...""" )
_snake_case : List[Any] = update_image_and_anno(snake_case__ , snake_case__ , snake_case__ )
for index, image in enumerate(snake_case__ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_snake_case : Optional[Any] = random_chars(32 )
_snake_case : Tuple = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0]
_snake_case : Dict = F"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"
cva.imwrite(F"/{file_root}.jpg" , snake_case__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"Success {index+1}/{len(snake_case__ )} with {file_name}" )
_snake_case : Optional[int] = []
for anno in new_annos[index]:
_snake_case : str = F"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"
annos_list.append(snake_case__ )
with open(F"/{file_root}.txt" , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
_snake_case : List[str] = []
_snake_case : List[str] = []
for label_file in glob.glob(os.path.join(snake_case__ , """*.txt""" ) ):
_snake_case : int = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(snake_case__ ) as in_file:
_snake_case : Union[str, Any] = in_file.readlines()
_snake_case : int = os.path.join(snake_case__ , F"{label_name}.jpg" )
_snake_case : str = []
for obj_list in obj_lists:
_snake_case : Dict = obj_list.rstrip("""\n""" ).split(""" """ )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(snake_case__ )
labels.append(snake_case__ )
return img_paths, labels
def UpperCAmelCase__ (snake_case__ : list , snake_case__ : list , snake_case__ : int = 1 ):
"""simple docstring"""
_snake_case : Dict = []
_snake_case : Any = []
_snake_case : List[Any] = []
for idx in range(len(snake_case__ ) ):
_snake_case : Optional[int] = []
_snake_case : Any = img_list[idx]
path_list.append(snake_case__ )
_snake_case : Tuple = anno_list[idx]
_snake_case : Any = cva.imread(snake_case__ )
if flip_type == 1:
_snake_case : Optional[int] = cva.flip(snake_case__ , snake_case__ )
for bbox in img_annos:
_snake_case : List[str] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
_snake_case : List[Any] = cva.flip(snake_case__ , snake_case__ )
for bbox in img_annos:
_snake_case : int = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(snake_case__ )
new_imgs_list.append(snake_case__ )
return new_imgs_list, new_annos_lists, path_list
def UpperCAmelCase__ (snake_case__ : int = 32 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
_snake_case : Dict = ascii_lowercase + digits
return "".join(random.choice(snake_case__ ) for _ in range(snake_case__ ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 720
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowercase( __a ):
'''simple docstring'''
@staticmethod
@abstractmethod
def UpperCamelCase_ ( a_: ArgumentParser ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
raise NotImplementedError()
| 28
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
SCREAMING_SNAKE_CASE__ : Optional[int] = {"""tokenization_herbert""": ["""HerbertTokenizer"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""HerbertTokenizerFast"""]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 0
|
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__A = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class _A ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Path , __SCREAMING_SNAKE_CASE : Union[str, None] = None , __SCREAMING_SNAKE_CASE : Union[List[str], None] = None , __SCREAMING_SNAKE_CASE : Union[str, List[str], None] = None , __SCREAMING_SNAKE_CASE : bool = True , ) -> List[str]:
__UpperCAmelCase =[file for file in os.listdir(__SCREAMING_SNAKE_CASE ) if os.path.isfile(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )]
if identifier is not None:
__UpperCAmelCase =[file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for n_ in n_identifier:
__UpperCAmelCase =[file for file in files if n_ not in file]
else:
__UpperCAmelCase =[file for file in files if n_identifier not in file]
__UpperCAmelCase =ignore_files or []
ignore_files.append("""__init__.py""" )
__UpperCAmelCase =[file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , __SCREAMING_SNAKE_CASE )
if only_modules:
__UpperCAmelCase =file.split(""".""" )[0]
try:
__UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =doctest.DocTestSuite(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =unittest.TextTestRunner().run(__SCREAMING_SNAKE_CASE )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'''{module_identifier} is not a module.''' )
else:
__UpperCAmelCase =doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def _a ( self : Optional[Any] ) -> List[str]:
__UpperCAmelCase =Path("""src/transformers""" )
__UpperCAmelCase ="""modeling"""
__UpperCAmelCase =[
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(__SCREAMING_SNAKE_CASE , identifier=__SCREAMING_SNAKE_CASE , ignore_files=__SCREAMING_SNAKE_CASE )
def _a ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase =Path("""src/transformers""" )
__UpperCAmelCase ="""tokenization"""
self.analyze_directory(__SCREAMING_SNAKE_CASE , identifier=__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[Any] ) -> Optional[Any]:
__UpperCAmelCase =Path("""src/transformers""" )
__UpperCAmelCase ="""configuration"""
self.analyze_directory(__SCREAMING_SNAKE_CASE , identifier=__SCREAMING_SNAKE_CASE )
def _a ( self : List[Any] ) -> Tuple:
__UpperCAmelCase =Path("""src/transformers""" )
__UpperCAmelCase =["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(__SCREAMING_SNAKE_CASE , n_identifier=__SCREAMING_SNAKE_CASE )
def _a ( self : Any ) -> Tuple:
__UpperCAmelCase =Path("""docs/source""" )
__UpperCAmelCase =["""favicon.ico"""]
self.analyze_directory(__SCREAMING_SNAKE_CASE , ignore_files=__SCREAMING_SNAKE_CASE , only_modules=__SCREAMING_SNAKE_CASE )
| 68
| 0
|
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCamelCase :
def __init__( self : Dict , snake_case__ : Optional[int] , snake_case__ : str=2 , snake_case__ : Dict=3 , snake_case__ : Optional[int]=4 , snake_case__ : Optional[Any]=2 , snake_case__ : Optional[int]=7 , snake_case__ : Optional[int]=True , snake_case__ : str=True , snake_case__ : Dict=True , snake_case__ : int=True , snake_case__ : Optional[int]=9_9 , snake_case__ : str=3_6 , snake_case__ : Any=2 , snake_case__ : int=4 , snake_case__ : str=3_7 , snake_case__ : Any="gelu" , snake_case__ : List[str]=0.1 , snake_case__ : Dict=0.1 , snake_case__ : List[Any]=5_1_2 , snake_case__ : Tuple=1_6 , snake_case__ : Any=2 , snake_case__ : List[str]=0.02 , snake_case__ : Union[str, Any]=6 , snake_case__ : str=6 , snake_case__ : Union[str, Any]=3 , snake_case__ : int=4 , snake_case__ : Optional[Any]=None , snake_case__ : List[str]=1_0_0_0 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = coordinate_size
SCREAMING_SNAKE_CASE = shape_size
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
SCREAMING_SNAKE_CASE = text_seq_length
SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2 + 1
SCREAMING_SNAKE_CASE = self.text_seq_length + self.image_seq_length
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
SCREAMING_SNAKE_CASE = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE = bbox[i, j, 3]
SCREAMING_SNAKE_CASE = bbox[i, j, 1]
SCREAMING_SNAKE_CASE = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE = bbox[i, j, 2]
SCREAMING_SNAKE_CASE = bbox[i, j, 0]
SCREAMING_SNAKE_CASE = tmp_coordinate
SCREAMING_SNAKE_CASE = tf.constant(snake_case__ )
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.text_seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase ( self : Dict , snake_case__ : Any , snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = TFLayoutLMvaModel(config=snake_case__ )
# text + image
SCREAMING_SNAKE_CASE = model(snake_case__ , pixel_values=snake_case__ , training=snake_case__ )
SCREAMING_SNAKE_CASE = model(
snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , training=snake_case__ , )
SCREAMING_SNAKE_CASE = model(snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , training=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
SCREAMING_SNAKE_CASE = model(snake_case__ , training=snake_case__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
SCREAMING_SNAKE_CASE = model({'pixel_values': pixel_values} , training=snake_case__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCamelCase ( self : str , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = TFLayoutLMvaForSequenceClassification(config=snake_case__ )
SCREAMING_SNAKE_CASE = model(
snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , training=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : int , snake_case__ : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = TFLayoutLMvaForTokenClassification(config=snake_case__ )
SCREAMING_SNAKE_CASE = model(
snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , training=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCamelCase ( self : int , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : int , snake_case__ : int , snake_case__ : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = TFLayoutLMvaForQuestionAnswering(config=snake_case__ )
SCREAMING_SNAKE_CASE = model(
snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , training=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = config_and_inputs
SCREAMING_SNAKE_CASE = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class UpperCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
__UpperCamelCase =(
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
__UpperCamelCase =(
{"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : str , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : Tuple ):
"""simple docstring"""
return True
def UpperCamelCase ( self : int , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Dict=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = copy.deepcopy(snake_case__ )
if model_class in get_values(snake_case__ ):
SCREAMING_SNAKE_CASE = {
k: tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(snake_case__ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(snake_case__ ):
SCREAMING_SNAKE_CASE = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(snake_case__ ):
SCREAMING_SNAKE_CASE = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
SCREAMING_SNAKE_CASE = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(snake_case__ ):
SCREAMING_SNAKE_CASE = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(snake_case__ ):
SCREAMING_SNAKE_CASE = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = TFLayoutLMvaModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def UpperCamelCase ( self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(snake_case__ )
if getattr(snake_case__ , 'hf_compute_loss' , snake_case__ ):
# The number of elements in the loss should be the same as the number of elements in the label
SCREAMING_SNAKE_CASE = self._prepare_for_class(inputs_dict.copy() , snake_case__ , return_labels=snake_case__ )
SCREAMING_SNAKE_CASE = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=snake_case__ )[0]
]
SCREAMING_SNAKE_CASE = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
SCREAMING_SNAKE_CASE = self._prepare_for_class(inputs_dict.copy() , snake_case__ , return_labels=snake_case__ )
SCREAMING_SNAKE_CASE = prepared_for_class.pop('input_ids' )
SCREAMING_SNAKE_CASE = model(snake_case__ , **snake_case__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
SCREAMING_SNAKE_CASE = self._prepare_for_class(inputs_dict.copy() , snake_case__ , return_labels=snake_case__ )
SCREAMING_SNAKE_CASE = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
SCREAMING_SNAKE_CASE = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
SCREAMING_SNAKE_CASE = -1_0_0
SCREAMING_SNAKE_CASE = tf.convert_to_tensor(snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ , **snake_case__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
SCREAMING_SNAKE_CASE = self._prepare_for_class(inputs_dict.copy() , snake_case__ , return_labels=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
SCREAMING_SNAKE_CASE = self._prepare_for_class(inputs_dict.copy() , snake_case__ , return_labels=snake_case__ )
# Get keys that were added with the _prepare_for_class function
SCREAMING_SNAKE_CASE = prepared_for_class.keys() - inputs_dict.keys()
SCREAMING_SNAKE_CASE = inspect.signature(model.call ).parameters
SCREAMING_SNAKE_CASE = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
SCREAMING_SNAKE_CASE = {0: 'input_ids'}
for label_key in label_keys:
SCREAMING_SNAKE_CASE = signature_names.index(snake_case__ )
SCREAMING_SNAKE_CASE = label_key
SCREAMING_SNAKE_CASE = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
SCREAMING_SNAKE_CASE = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
SCREAMING_SNAKE_CASE = prepared_for_class[value]
SCREAMING_SNAKE_CASE = tuple(snake_case__ )
# Send to model
SCREAMING_SNAKE_CASE = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
@slow
def UpperCamelCase ( self : Any ):
"""simple docstring"""
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = TFLayoutLMvaModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def __lowerCAmelCase ( ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class UpperCamelCase ( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=snake_case__ ) if is_vision_available() else None
@slow
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=snake_case__ , return_tensors='tf' ).pixel_values
SCREAMING_SNAKE_CASE = tf.constant([[1, 2]] )
SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
SCREAMING_SNAKE_CASE = model(input_ids=snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , training=snake_case__ )
# verify the logits
SCREAMING_SNAKE_CASE = (1, 1_9_9, 7_6_8)
self.assertEqual(outputs.last_hidden_state.shape , snake_case__ )
SCREAMING_SNAKE_CASE = tf.constant(
[[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case__ , atol=1E-4 ) )
| 701
|
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def __lowerCAmelCase ( *_UpperCamelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with open(_UpperCamelCase , 'r' ) as fh:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_EX )
try:
print(*_UpperCamelCase )
finally:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_UN )
a_ : int = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
a_ : str = torch.device("cuda", local_rank)
a_ : Optional[int] = socket.gethostname()
a_ : Union[str, Any] = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
a_ : Dict = dist.get_rank()
a_ : Any = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 673
| 0
|
'''simple docstring'''
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : Dict = int(UpperCamelCase__ )
if decimal in (0, 1): # Exit cases for the recursion
return str(UpperCamelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ : Any = divmod(UpperCamelCase__ , 2 )
return binary_recursive(UpperCamelCase__ ) + str(UpperCamelCase__ )
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : Optional[Any] = str(UpperCamelCase__ ).strip()
if not number:
raise ValueError("""No input value was provided""" )
UpperCAmelCase__ : Optional[int] = """-""" if number.startswith("""-""" ) else """"""
UpperCAmelCase__ : List[Any] = number.lstrip("""-""" )
if not number.isnumeric():
raise ValueError("""Input value is not an integer""" )
return f'''{negative}0b{binary_recursive(int(UpperCamelCase__ ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 407
|
'''simple docstring'''
def _UpperCamelCase ( UpperCamelCase__ ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : int = f'''Input value of [number={number}] must be an integer'''
raise TypeError(UpperCamelCase__ )
if number < 1:
UpperCAmelCase__ : Optional[Any] = f'''Input value of [number={number}] must be > 0'''
raise ValueError(UpperCamelCase__ )
UpperCAmelCase__ : Optional[Any] = 1
for i in range(1 , UpperCamelCase__ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 407
| 1
|
'''simple docstring'''
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = (KDPMaDiscreteScheduler,)
lowerCAmelCase = 10
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = {
'num_train_timesteps': 1100,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**_UpperCAmelCase)
return config
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02]):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = self.scheduler_classes[0]
__A : str = self.get_scheduler_config(prediction_type='v_prediction')
__A : Optional[Any] = scheduler_class(**_UpperCAmelCase)
scheduler.set_timesteps(self.num_inference_steps)
__A : List[Any] = self.dummy_model()
__A : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
__A : Any = sample.to(_UpperCAmelCase)
for i, t in enumerate(scheduler.timesteps):
__A : Optional[Any] = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase)
__A : Any = model(_UpperCAmelCase , _UpperCAmelCase)
__A : Tuple = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : Union[str, Any] = output.prev_sample
__A : Any = torch.sum(torch.abs(_UpperCAmelCase))
__A : str = torch.mean(torch.abs(_UpperCAmelCase))
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4e-0_7) < 1e-2
assert abs(result_mean.item() - 6.1_1_1_2e-1_0) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7) < 1e-2
assert abs(result_mean.item() - 0.0002) < 1e-3
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
if torch_device == "mps":
return
__A : Optional[Any] = self.scheduler_classes[0]
__A : List[Any] = self.get_scheduler_config()
__A : List[Any] = scheduler_class(**_UpperCAmelCase)
scheduler.set_timesteps(self.num_inference_steps)
__A : Any = self.dummy_model()
__A : int = self.dummy_sample_deter * scheduler.init_noise_sigma
__A : List[Any] = sample.to(_UpperCAmelCase)
for i, t in enumerate(scheduler.timesteps):
__A : Dict = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase)
__A : Dict = model(_UpperCAmelCase , _UpperCAmelCase)
__A : int = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : Dict = output.prev_sample
__A : Tuple = torch.sum(torch.abs(_UpperCAmelCase))
__A : str = torch.mean(torch.abs(_UpperCAmelCase))
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125) < 1e-2
assert abs(result_mean.item() - 0.0266) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125) < 1e-2
assert abs(result_mean.item() - 0.0266) < 1e-3
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
if torch_device == "mps":
return
__A : List[str] = self.scheduler_classes[0]
__A : Optional[Any] = self.get_scheduler_config()
__A : List[Any] = scheduler_class(**_UpperCAmelCase)
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCAmelCase)
__A : Optional[int] = self.dummy_model()
__A : str = self.dummy_sample_deter.to(_UpperCAmelCase) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__A : str = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase)
__A : List[Any] = model(_UpperCAmelCase , _UpperCAmelCase)
__A : int = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : Union[str, Any] = output.prev_sample
__A : int = torch.sum(torch.abs(_UpperCAmelCase))
__A : Dict = torch.mean(torch.abs(_UpperCAmelCase))
if str(_UpperCAmelCase).startswith('cpu'):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125) < 1e-2
assert abs(result_mean.item() - 0.0266) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125) < 1e-2
assert abs(result_mean.item() - 0.0266) < 1e-3
| 703
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=30 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.02 , ):
'''simple docstring'''
__A : Union[str, Any] = parent
__A : Optional[Any] = batch_size
__A : Union[str, Any] = image_size
__A : Optional[int] = patch_size
__A : int = num_channels
__A : int = is_training
__A : List[Any] = use_labels
__A : Optional[int] = hidden_size
__A : Union[str, Any] = num_hidden_layers
__A : Optional[Any] = num_attention_heads
__A : List[str] = intermediate_size
__A : Any = hidden_act
__A : Optional[Any] = hidden_dropout_prob
__A : List[str] = attention_probs_dropout_prob
__A : Union[str, Any] = type_sequence_label_size
__A : Optional[int] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__A : Optional[int] = (image_size // patch_size) ** 2
__A : Dict = num_patches + 1
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__A : str = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
return config, pixel_values
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : int = FlaxViTModel(config=_UpperCAmelCase)
__A : Any = model(_UpperCAmelCase)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
__A : int = (self.image_size, self.image_size)
__A : Any = (self.patch_size, self.patch_size)
__A : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : int = self.type_sequence_label_size
__A : List[Any] = FlaxViTForImageClassification(config=_UpperCAmelCase)
__A : Union[str, Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
__A : List[str] = 1
__A : Optional[int] = FlaxViTForImageClassification(_UpperCAmelCase)
__A : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__A : Optional[int] = model(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__A
) ,(
__A
) ,
) : Union[str, Any] = config_and_inputs
__A : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE (a__ , unittest.TestCase ):
lowerCAmelCase = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = FlaxViTModelTester(self)
__A : Union[str, Any] = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : int = model_class(_UpperCAmelCase)
__A : Optional[int] = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : Dict = [*signature.parameters.keys()]
__A : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
__A : str = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)
__A : Optional[Any] = model_class(_UpperCAmelCase)
@jax.jit
def model_jitted(_UpperCAmelCase , **_UpperCAmelCase):
return model(pixel_values=_UpperCAmelCase , **_UpperCAmelCase)
with self.subTest('JIT Enabled'):
__A : Optional[Any] = model_jitted(**_UpperCAmelCase).to_tuple()
with self.subTest('JIT Disabled'):
with jax.disable_jit():
__A : Optional[Any] = model_jitted(**_UpperCAmelCase).to_tuple()
self.assertEqual(len(_UpperCAmelCase) , len(_UpperCAmelCase))
for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__A : Optional[Any] = model_class_name.from_pretrained('google/vit-base-patch16-224')
__A : Dict = model(np.ones((1, 3, 224, 224)))
self.assertIsNotNone(_UpperCAmelCase)
| 338
| 0
|
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
lowerCamelCase_ = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(lowercase )
DownloadCommand.register_subcommand(lowercase )
EnvironmentCommand.register_subcommand(lowercase )
RunCommand.register_subcommand(lowercase )
ServeCommand.register_subcommand(lowercase )
UserCommands.register_subcommand(lowercase )
AddNewModelCommand.register_subcommand(lowercase )
AddNewModelLikeCommand.register_subcommand(lowercase )
LfsCommands.register_subcommand(lowercase )
PTtoTFCommand.register_subcommand(lowercase )
# Let's go
lowerCamelCase_ = parser.parse_args()
if not hasattr(lowercase , 'func' ):
parser.print_help()
exit(1 )
# Run
lowerCamelCase_ = args.func(lowercase )
service.run()
if __name__ == "__main__":
main()
| 70
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class A( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , A_ : Dict , A_ : int=7 , A_ : Any=3 , A_ : List[str]=30 , A_ : Union[str, Any]=400 , A_ : List[str]=True , A_ : int=None , A_ : Any=True , A_ : str=1 / 255 , A_ : int=True , A_ : List[Any]=[0.5, 0.5, 0.5] , A_ : Union[str, Any]=[0.5, 0.5, 0.5] , A_ : Union[str, Any]=True , ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = min_resolution
lowerCamelCase_ = max_resolution
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_factor
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean
lowerCamelCase_ = image_std
lowerCamelCase_ = do_pad
def a__ ( self : Tuple ) -> Dict:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def a__ ( self : Union[str, Any] , A_ : Dict , A_ : Any=False ) -> Union[str, Any]:
"""simple docstring"""
if not batched:
lowerCamelCase_ = image_inputs[0]
if isinstance(A_ , Image.Image ):
lowerCamelCase_ , lowerCamelCase_ = image.size
else:
lowerCamelCase_ , lowerCamelCase_ = image.shape[1], image.shape[2]
if w < h:
lowerCamelCase_ = int(self.size['shortest_edge'] * h / w )
lowerCamelCase_ = self.size['shortest_edge']
elif w > h:
lowerCamelCase_ = self.size['shortest_edge']
lowerCamelCase_ = int(self.size['shortest_edge'] * w / h )
else:
lowerCamelCase_ = self.size['shortest_edge']
lowerCamelCase_ = self.size['shortest_edge']
else:
lowerCamelCase_ = []
for image in image_inputs:
lowerCamelCase_ , lowerCamelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase_ = max(A_ , key=lambda A_ : item[0] )[0]
lowerCamelCase_ = max(A_ , key=lambda A_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = DetrImageProcessor if is_vision_available() else None
def a__ ( self : List[Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ = DetrImageProcessingTester(self )
@property
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self : Dict ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'image_mean' ) )
self.assertTrue(hasattr(A_ , 'image_std' ) )
self.assertTrue(hasattr(A_ , 'do_normalize' ) )
self.assertTrue(hasattr(A_ , 'do_rescale' ) )
self.assertTrue(hasattr(A_ , 'rescale_factor' ) )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'do_pad' ) )
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , A_ )
lowerCamelCase_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A_ )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , A_ )
def a__ ( self : Dict ) -> Any:
"""simple docstring"""
pass
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
lowerCamelCase_ = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ = image_processing(A_ , return_tensors='pt' ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ = image_processing(A_ , return_tensors='pt' ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def a__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
lowerCamelCase_ = json.loads(f.read() )
lowerCamelCase_ = {'image_id': 39769, 'annotations': target}
# encode them
lowerCamelCase_ = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
lowerCamelCase_ = image_processing(images=A_ , annotations=A_ , return_tensors='pt' )
# verify pixel values
lowerCamelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , A_ )
lowerCamelCase_ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
lowerCamelCase_ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , A_ ) )
# verify boxes
lowerCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , A_ )
lowerCamelCase_ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , A_ , atol=1E-3 ) )
# verify image_id
lowerCamelCase_ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , A_ ) )
# verify is_crowd
lowerCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , A_ ) )
# verify class_labels
lowerCamelCase_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , A_ ) )
# verify orig_size
lowerCamelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , A_ ) )
# verify size
lowerCamelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , A_ ) )
@slow
def a__ ( self : str ) -> Any:
"""simple docstring"""
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
lowerCamelCase_ = json.loads(f.read() )
lowerCamelCase_ = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
lowerCamelCase_ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
lowerCamelCase_ = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
lowerCamelCase_ = image_processing(images=A_ , annotations=A_ , masks_path=A_ , return_tensors='pt' )
# verify pixel values
lowerCamelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , A_ )
lowerCamelCase_ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
lowerCamelCase_ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , A_ ) )
# verify boxes
lowerCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , A_ )
lowerCamelCase_ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , A_ , atol=1E-3 ) )
# verify image_id
lowerCamelCase_ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , A_ ) )
# verify is_crowd
lowerCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , A_ ) )
# verify class_labels
lowerCamelCase_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , A_ ) )
# verify masks
lowerCamelCase_ = 822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , A_ )
# verify orig_size
lowerCamelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , A_ ) )
# verify size
lowerCamelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , A_ ) )
| 70
| 1
|
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
a : str = logging.get_logger(__name__)
@add_end_docstrings(_UpperCamelCase )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : Tuple , *a_ : List[str] , **a_ : Any ):
"""simple docstring"""
super().__init__(*a_ , **a_ )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def A ( self : int , a_ : Tuple=None , a_ : List[str]=None , a_ : Dict=None ):
"""simple docstring"""
__snake_case = {}
__snake_case = {}
if prompt is not None:
__snake_case = prompt
if generate_kwargs is not None:
__snake_case = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__snake_case = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"
" please use only one" )
__snake_case = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Optional[Any] , a_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **a_ : Optional[int] ):
"""simple docstring"""
return super().__call__(a_ , **a_ )
def A ( self : Tuple , a_ : Optional[int] , a_ : Tuple=None ):
"""simple docstring"""
__snake_case = load_image(a_ )
if prompt is not None:
if not isinstance(a_ , a_ ):
raise ValueError(
f'''Received an invalid text input, got - {type(a_ )} - but expected a single string. '''
"Note also that one single text can be provided for conditional image to text generation." )
__snake_case = self.model.config.model_type
if model_type == "git":
__snake_case = self.image_processor(images=a_ , return_tensors=self.framework )
__snake_case = self.tokenizer(text=a_ , add_special_tokens=a_ ).input_ids
__snake_case = [self.tokenizer.cls_token_id] + input_ids
__snake_case = torch.tensor(a_ ).unsqueeze(0 )
model_inputs.update({"input_ids": input_ids} )
elif model_type == "pix2struct":
__snake_case = self.image_processor(images=a_ , header_text=a_ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__snake_case = self.image_processor(images=a_ , return_tensors=self.framework )
__snake_case = self.tokenizer(a_ , return_tensors=self.framework )
model_inputs.update(a_ )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
__snake_case = self.image_processor(images=a_ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
__snake_case = None
return model_inputs
def A ( self : List[str] , a_ : List[str] , a_ : Optional[int]=None ):
"""simple docstring"""
if (
"input_ids" in model_inputs
and isinstance(model_inputs["input_ids"] , a_ )
and all(x is None for x in model_inputs["input_ids"] )
):
__snake_case = None
if generate_kwargs is None:
__snake_case = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__snake_case = model_inputs.pop(self.model.main_input_name )
__snake_case = self.model.generate(a_ , **a_ , **a_ )
return model_outputs
def A ( self : List[str] , a_ : Any ):
"""simple docstring"""
__snake_case = []
for output_ids in model_outputs:
__snake_case = {
"generated_text": self.tokenizer.decode(
a_ , skip_special_tokens=a_ , )
}
records.append(a_ )
return records
| 708
|
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("dataset_size" , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize("input_in_memory_max_size" , ["default", 0, 1_00 * 2**20, 9_00 * 2**20] )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> int:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , "IN_MEMORY_MAX_SIZE" , _UpperCAmelCase )
__snake_case = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
__snake_case = dataset_size < in_memory_max_size
else:
__snake_case = False
__snake_case = is_small_dataset(_UpperCAmelCase )
assert result == expected
| 680
| 0
|
import numpy as np
from transformers import Pipeline
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[Any] = np.max(lowerCamelCase__ , axis=-1 , keepdims=lowerCamelCase__ )
lowercase__ : str = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowerCamelCase__ )
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def snake_case ( self : Optional[Any] , **SCREAMING_SNAKE_CASE : int ):
lowercase__ : List[str] = {}
if "second_text" in kwargs:
lowercase__ : Dict = kwargs["second_text"]
return preprocess_kwargs, {}, {}
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
return self.tokenizer(SCREAMING_SNAKE_CASE , text_pair=SCREAMING_SNAKE_CASE , return_tensors=self.framework )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Union[str, Any] ):
return self.model(**SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : Any ):
lowercase__ : str = model_outputs.logits[0].numpy()
lowercase__ : Any = softmax(SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = np.argmax(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = self.model.config.idalabel[best_class]
lowercase__ : List[str] = probabilities[best_class].item()
lowercase__ : List[str] = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 496
|
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[int] = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
"decoder.output_projection.weight",
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase__ , lowerCamelCase__ )
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ , lowercase__ : str = emb.weight.shape
lowercase__ : List[str] = nn.Linear(lowerCamelCase__ , lowerCamelCase__ , bias=lowerCamelCase__ )
lowercase__ : List[Any] = emb.weight.data
return lin_layer
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__="facebook/mbart-large-en-ro" , lowerCamelCase__=False , lowerCamelCase__=False ):
"""simple docstring"""
lowercase__ : int = torch.load(lowerCamelCase__ , map_location="cpu" )["model"]
remove_ignore_keys_(lowerCamelCase__ )
lowercase__ : Tuple = state_dict["encoder.embed_tokens.weight"].shape[0]
lowercase__ : Optional[Any] = MBartConfig.from_pretrained(lowerCamelCase__ , vocab_size=lowerCamelCase__ )
if mbart_aa and finetuned:
lowercase__ : str = "relu"
lowercase__ : Optional[int] = state_dict["decoder.embed_tokens.weight"]
lowercase__ : Optional[Any] = MBartForConditionalGeneration(lowerCamelCase__ )
model.model.load_state_dict(lowerCamelCase__ )
if finetuned:
lowercase__ : Optional[Any] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 496
| 1
|
"""simple docstring"""
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCAmelCase ( lowerCamelCase__ : str, lowerCamelCase__ : str, lowerCamelCase__ : str ) -> str:
def get_masked_lm_array(lowerCamelCase__ : str ):
_SCREAMING_SNAKE_CASE : List[str] = f'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
_SCREAMING_SNAKE_CASE : int = tf.train.load_variable(lowerCamelCase__, lowerCamelCase__ )
if "kernel" in name:
_SCREAMING_SNAKE_CASE : List[str] = array.transpose()
return torch.from_numpy(lowerCamelCase__ )
def get_encoder_array(lowerCamelCase__ : str ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = f'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
_SCREAMING_SNAKE_CASE : Tuple = tf.train.load_variable(lowerCamelCase__, lowerCamelCase__ )
if "kernel" in name:
_SCREAMING_SNAKE_CASE : Optional[int] = array.transpose()
return torch.from_numpy(lowerCamelCase__ )
def get_encoder_layer_array(lowerCamelCase__ : int, lowerCamelCase__ : str ):
_SCREAMING_SNAKE_CASE : Optional[int] = f'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
_SCREAMING_SNAKE_CASE : int = tf.train.load_variable(lowerCamelCase__, lowerCamelCase__ )
if "kernel" in name:
_SCREAMING_SNAKE_CASE : Tuple = array.transpose()
return torch.from_numpy(lowerCamelCase__ )
def get_encoder_attention_layer_array(lowerCamelCase__ : int, lowerCamelCase__ : str, lowerCamelCase__ : Any ):
_SCREAMING_SNAKE_CASE : Tuple = f'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
_SCREAMING_SNAKE_CASE : Union[str, Any] = tf.train.load_variable(lowerCamelCase__, lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : str = array.reshape(lowerCamelCase__ )
if "kernel" in name:
_SCREAMING_SNAKE_CASE : List[str] = array.transpose()
return torch.from_numpy(lowerCamelCase__ )
print(f'''Loading model based on config from {config_path}...''' )
_SCREAMING_SNAKE_CASE : Optional[int] = BertConfig.from_json_file(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : List[Any] = BertForMaskedLM(lowerCamelCase__ )
# Layers
for layer_index in range(0, config.num_hidden_layers ):
_SCREAMING_SNAKE_CASE : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
_SCREAMING_SNAKE_CASE : BertSelfAttention = layer.attention.self
_SCREAMING_SNAKE_CASE : Union[str, Any] = get_encoder_attention_layer_array(
lowerCamelCase__, "_query_dense/kernel", self_attn.query.weight.data.shape )
_SCREAMING_SNAKE_CASE : Optional[Any] = get_encoder_attention_layer_array(
lowerCamelCase__, "_query_dense/bias", self_attn.query.bias.data.shape )
_SCREAMING_SNAKE_CASE : Dict = get_encoder_attention_layer_array(
lowerCamelCase__, "_key_dense/kernel", self_attn.key.weight.data.shape )
_SCREAMING_SNAKE_CASE : Any = get_encoder_attention_layer_array(
lowerCamelCase__, "_key_dense/bias", self_attn.key.bias.data.shape )
_SCREAMING_SNAKE_CASE : List[str] = get_encoder_attention_layer_array(
lowerCamelCase__, "_value_dense/kernel", self_attn.value.weight.data.shape )
_SCREAMING_SNAKE_CASE : Any = get_encoder_attention_layer_array(
lowerCamelCase__, "_value_dense/bias", self_attn.value.bias.data.shape )
# Self-attention Output
_SCREAMING_SNAKE_CASE : BertSelfOutput = layer.attention.output
_SCREAMING_SNAKE_CASE : Tuple = get_encoder_attention_layer_array(
lowerCamelCase__, "_output_dense/kernel", self_output.dense.weight.data.shape )
_SCREAMING_SNAKE_CASE : Tuple = get_encoder_attention_layer_array(
lowerCamelCase__, "_output_dense/bias", self_output.dense.bias.data.shape )
_SCREAMING_SNAKE_CASE : Any = get_encoder_layer_array(lowerCamelCase__, "_attention_layer_norm/gamma" )
_SCREAMING_SNAKE_CASE : str = get_encoder_layer_array(lowerCamelCase__, "_attention_layer_norm/beta" )
# Intermediate
_SCREAMING_SNAKE_CASE : BertIntermediate = layer.intermediate
_SCREAMING_SNAKE_CASE : Any = get_encoder_layer_array(lowerCamelCase__, "_intermediate_dense/kernel" )
_SCREAMING_SNAKE_CASE : Dict = get_encoder_layer_array(lowerCamelCase__, "_intermediate_dense/bias" )
# Output
_SCREAMING_SNAKE_CASE : BertOutput = layer.output
_SCREAMING_SNAKE_CASE : Tuple = get_encoder_layer_array(lowerCamelCase__, "_output_dense/kernel" )
_SCREAMING_SNAKE_CASE : Optional[int] = get_encoder_layer_array(lowerCamelCase__, "_output_dense/bias" )
_SCREAMING_SNAKE_CASE : Optional[Any] = get_encoder_layer_array(lowerCamelCase__, "_output_layer_norm/gamma" )
_SCREAMING_SNAKE_CASE : Tuple = get_encoder_layer_array(lowerCamelCase__, "_output_layer_norm/beta" )
# Embeddings
_SCREAMING_SNAKE_CASE : int = get_encoder_array("_position_embedding_layer/embeddings" )
_SCREAMING_SNAKE_CASE : Dict = get_encoder_array("_type_embedding_layer/embeddings" )
_SCREAMING_SNAKE_CASE : List[str] = get_encoder_array("_embedding_norm_layer/gamma" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = get_encoder_array("_embedding_norm_layer/beta" )
# LM Head
_SCREAMING_SNAKE_CASE : Dict = model.cls.predictions.transform
_SCREAMING_SNAKE_CASE : int = get_masked_lm_array("dense/kernel" )
_SCREAMING_SNAKE_CASE : Dict = get_masked_lm_array("dense/bias" )
_SCREAMING_SNAKE_CASE : List[str] = get_masked_lm_array("layer_norm/gamma" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = get_masked_lm_array("layer_norm/beta" )
_SCREAMING_SNAKE_CASE : Optional[int] = get_masked_lm_array("embedding_table" )
# Pooling
_SCREAMING_SNAKE_CASE : Optional[int] = BertPooler(config=lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : BertPooler = get_encoder_array("_pooler_layer/kernel" )
_SCREAMING_SNAKE_CASE : BertPooler = get_encoder_array("_pooler_layer/bias" )
# Export final model
model.save_pretrained(lowerCamelCase__ )
# Integration test - should load without any errors ;)
_SCREAMING_SNAKE_CASE : List[str] = BertForMaskedLM.from_pretrained(lowerCamelCase__ )
print(new_model.eval() )
print("Model conversion was done sucessfully!" )
if __name__ == "__main__":
lowercase_ : int = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow Token Dropping checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
lowercase_ : List[str] = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 295
|
"""simple docstring"""
def _lowerCAmelCase ( lowerCamelCase__ : str, lowerCamelCase__ : str ) -> Union[str, Any]:
print("\nThe shortest path matrix using Floyd Warshall algorithm\n" )
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
if dist[i][j] != float("inf" ):
print(int(dist[i][j] ), end="\t" )
else:
print("INF", end="\t" )
print()
def _lowerCAmelCase ( lowerCamelCase__ : Dict, lowerCamelCase__ : Any ) -> List[str]:
_SCREAMING_SNAKE_CASE : List[Any] = [[float("inf" ) for _ in range(lowerCamelCase__ )] for _ in range(lowerCamelCase__ )]
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
_SCREAMING_SNAKE_CASE : List[Any] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(lowerCamelCase__ ):
# looping through rows of graph array
for i in range(lowerCamelCase__ ):
# looping through columns of graph array
for j in range(lowerCamelCase__ ):
if (
dist[i][k] != float("inf" )
and dist[k][j] != float("inf" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
_SCREAMING_SNAKE_CASE : List[Any] = dist[i][k] + dist[k][j]
_print_dist(lowerCamelCase__, lowerCamelCase__ )
return dist, v
if __name__ == "__main__":
lowercase_ : Tuple = int(input('''Enter number of vertices: '''))
lowercase_ : List[Any] = int(input('''Enter number of edges: '''))
lowercase_ : Optional[Any] = [[float('''inf''') for i in range(v)] for j in range(v)]
for i in range(v):
lowercase_ : Tuple = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('''\nEdge ''', i + 1)
lowercase_ : str = int(input('''Enter source:'''))
lowercase_ : Optional[Any] = int(input('''Enter destination:'''))
lowercase_ : Union[str, Any] = float(input('''Enter weight:'''))
lowercase_ : str = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 295
| 1
|
"""simple docstring"""
def lowercase ( UpperCamelCase : Tuple = 3 , UpperCamelCase : List[Any] = 7 , UpperCamelCase : List[Any] = 1000000 ):
"""simple docstring"""
A__ : Union[str, Any] =0
A__ : int =1
for current_denominator in range(1 , limit + 1 ):
A__ : Tuple =current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
A__ : Any =current_numerator
A__ : Union[str, Any] =current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_000_000))
| 656
|
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
__UpperCAmelCase = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
__UpperCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__UpperCAmelCase = dict(zip(vocab, range(len(vocab))))
__UpperCAmelCase = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase = Path(tmpdirname)
__UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
__UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
__UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
__UpperCAmelCase = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
__UpperCAmelCase = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
__UpperCAmelCase = FSMTForConditionalGeneration(config)
print(f"""num of params {tiny_model.num_parameters()}""")
# Test
__UpperCAmelCase = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
__UpperCAmelCase = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 90
| 0
|
"""simple docstring"""
def _snake_case ( lowerCamelCase__ : int ) -> int:
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError("Input must be an integer" )
if input_num <= 0:
raise ValueError("Input must be positive" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719
|
"""simple docstring"""
def _snake_case ( lowerCamelCase__ : int , lowerCamelCase__ : int ) -> int:
return int((input_a, input_a).count(0 ) == 0 )
def _snake_case ( ) -> None:
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 244
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = StableDiffusionInpaintPipeline
__UpperCAmelCase : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__UpperCAmelCase : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__UpperCAmelCase : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__UpperCAmelCase : Tuple = frozenset([] )
def __UpperCAmelCase ( self ):
torch.manual_seed(0 )
__a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_a , )
__a = PNDMScheduler(skip_prk_steps=_a )
torch.manual_seed(0 )
__a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , )
__a = CLIPTextModel(_a )
__a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__a = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __UpperCAmelCase ( self , _a , _a=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
__a = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
__a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a = Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((64, 64) )
__a = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(_a ).startswith('''mps''' ):
__a = torch.manual_seed(_a )
else:
__a = torch.Generator(device=_a ).manual_seed(_a )
__a = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __UpperCAmelCase ( self ):
__a = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components()
__a = StableDiffusionInpaintPipeline(**_a )
__a = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
__a = self.get_dummy_inputs(_a )
__a = sd_pipe(**_a ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
__a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
__a = '''stabilityai/stable-diffusion-2-inpainting'''
__a = StableDiffusionInpaintPipeline.from_pretrained(_a , safety_checker=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
__a = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__a = torch.manual_seed(0 )
__a = pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , output_type='''np''' , )
__a = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def __UpperCAmelCase ( self ):
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
__a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
__a = '''stabilityai/stable-diffusion-2-inpainting'''
__a = StableDiffusionInpaintPipeline.from_pretrained(
_a , torch_dtype=torch.floataa , safety_checker=_a , )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
__a = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__a = torch.manual_seed(0 )
__a = pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , output_type='''np''' , )
__a = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def __UpperCAmelCase ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
__a = '''stabilityai/stable-diffusion-2-inpainting'''
__a = PNDMScheduler.from_pretrained(_a , subfolder='''scheduler''' )
__a = StableDiffusionInpaintPipeline.from_pretrained(
_a , safety_checker=_a , scheduler=_a , torch_dtype=torch.floataa , )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__a = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__a = torch.manual_seed(0 )
__a = pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , num_inference_steps=2 , output_type='''np''' , )
__a = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 695
|
"""simple docstring"""
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : int = 0
__UpperCAmelCase : bool = False
__UpperCAmelCase : float = 3.0
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_a ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} )
@require_cuda
def __UpperCAmelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
__a = GradScalerKwargs(init_scale=1_024 , growth_factor=2 )
AcceleratorState._reset_state()
__a = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__a = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_000 )
self.assertEqual(scaler._enabled , _a )
@require_multi_gpu
def __UpperCAmelCase ( self ):
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(_a , env=os.environ.copy() )
if __name__ == "__main__":
lowercase_ = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
lowercase_ = Accelerator(kwargs_handlers=[ddp_scaler])
lowercase_ = torch.nn.Linear(1_0_0, 2_0_0)
lowercase_ = accelerator.prepare(model)
# Check the values changed in kwargs
lowercase_ = ""
lowercase_ = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 695
| 1
|
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def lowerCamelCase__ ( ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase =argparse.ArgumentParser()
parser.add_argument("--model_ckpt" , type=_lowerCamelCase , default="microsoft/unixcoder-base-nine" )
parser.add_argument("--num_epochs" , type=_lowerCamelCase , default=5 )
parser.add_argument("--batch_size" , type=_lowerCamelCase , default=6 )
parser.add_argument("--gradient_accumulation_steps" , type=_lowerCamelCase , default=1 )
parser.add_argument("--freeze" , type=_lowerCamelCase , default=_lowerCamelCase )
parser.add_argument("--learning_rate" , type=_lowerCamelCase , default=5e-4 )
parser.add_argument("--seed" , type=_lowerCamelCase , default=0 )
parser.add_argument("--lr_scheduler_type" , type=_lowerCamelCase , default="cosine" )
parser.add_argument("--num_warmup_steps" , type=_lowerCamelCase , default=10 )
parser.add_argument("--weight_decay" , type=_lowerCamelCase , default=0.01 )
parser.add_argument("--output_dir" , type=_lowerCamelCase , default="./results" )
return parser.parse_args()
snake_case__ : Tuple = load('accuracy')
def lowerCamelCase__ ( _lowerCamelCase ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase =eval_pred
_UpperCAmelCase =np.argmax(_lowerCamelCase , axis=1 )
return metric.compute(predictions=_lowerCamelCase , references=_lowerCamelCase )
class _a ( A__ ):
"""simple docstring"""
def __init__( self , _snake_case ):
super().__init__()
_UpperCAmelCase =trainer
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case , **_snake_case ):
if control.should_evaluate:
_UpperCAmelCase =deepcopy(_snake_case )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train" )
return control_copy
def lowerCamelCase__ ( ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase =get_args()
set_seed(args.seed )
_UpperCAmelCase =load_dataset("codeparrot/codecomplex" , split="train" )
_UpperCAmelCase =dataset.train_test_split(test_size=0.2 )
_UpperCAmelCase =train_test["test"].train_test_split(test_size=0.5 )
_UpperCAmelCase =DatasetDict(
{
"train": train_test["train"],
"test": test_validation["train"],
"valid": test_validation["test"],
} )
print("Loading tokenizer and model" )
_UpperCAmelCase =AutoTokenizer.from_pretrained(args.model_ckpt )
_UpperCAmelCase =tokenizer.eos_token
_UpperCAmelCase =AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
_UpperCAmelCase =model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
_UpperCAmelCase =False
_UpperCAmelCase =ClassLabel(num_classes=7 , names=list(set(train_test_validation["train"]["complexity"] ) ) )
def tokenize(_lowerCamelCase ):
_UpperCAmelCase =tokenizer(example["src"] , truncation=_lowerCamelCase , max_length=1024 )
_UpperCAmelCase =labels.straint(example["complexity"] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
_UpperCAmelCase =train_test_validation.map(
_lowerCamelCase , batched=_lowerCamelCase , remove_columns=train_test_validation["train"].column_names , )
_UpperCAmelCase =DataCollatorWithPadding(tokenizer=_lowerCamelCase )
_UpperCAmelCase =TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="epoch" , save_strategy="epoch" , logging_strategy="epoch" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="accuracy" , run_name="complexity-java" , report_to="wandb" , )
_UpperCAmelCase =Trainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=tokenized_datasets["train"] , eval_dataset=tokenized_datasets["valid"] , tokenizer=_lowerCamelCase , data_collator=_lowerCamelCase , compute_metrics=_lowerCamelCase , )
print("Training..." )
trainer.add_callback(CustomCallback(_lowerCamelCase ) )
trainer.train()
if __name__ == "__main__":
main()
| 710
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case__ : Union[str, Any] = logging.get_logger(__name__)
snake_case__ : Union[str, Any] = {'vocab_file': 'sentencepiece.bpe.model'}
snake_case__ : List[Any] = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
snake_case__ : str = {
'moussaKam/mbarthez': 1_0_2_4,
'moussaKam/barthez': 1_0_2_4,
'moussaKam/barthez-orangesum-title': 1_0_2_4,
}
snake_case__ : Tuple = '▁'
class _a ( A__ ):
"""simple docstring"""
snake_case =VOCAB_FILES_NAMES
snake_case =PRETRAINED_VOCAB_FILES_MAP
snake_case =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case =["""input_ids""", """attention_mask"""]
def __init__( self , _snake_case , _snake_case="<s>" , _snake_case="</s>" , _snake_case="</s>" , _snake_case="<s>" , _snake_case="<unk>" , _snake_case="<pad>" , _snake_case="<mask>" , _snake_case = None , **_snake_case , ):
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase =AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
_UpperCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , pad_token=_snake_case , mask_token=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
_UpperCAmelCase =vocab_file
_UpperCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_snake_case ) )
_UpperCAmelCase ={"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
_UpperCAmelCase =len(self.sp_model ) - 1
_UpperCAmelCase ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase =[self.cls_token_id]
_UpperCAmelCase =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case = None , _snake_case = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case )
if token_ids_a is None:
return [1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) + [1]
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case = None ):
_UpperCAmelCase =[self.sep_token_id]
_UpperCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self ):
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase ={self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self , _snake_case ):
return self.sp_model.encode(_snake_case , out_type=_snake_case )
def SCREAMING_SNAKE_CASE ( self , _snake_case ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase =self.sp_model.PieceToId(_snake_case )
return spm_id if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE ( self , _snake_case ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(_snake_case )
def SCREAMING_SNAKE_CASE ( self , _snake_case ):
_UpperCAmelCase =[]
_UpperCAmelCase =""
_UpperCAmelCase =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_snake_case ) + token
_UpperCAmelCase =True
_UpperCAmelCase =[]
else:
current_sub_tokens.append(_snake_case )
_UpperCAmelCase =False
out_string += self.sp_model.decode(_snake_case )
return out_string.strip()
def __getstate__( self ):
_UpperCAmelCase =self.__dict__.copy()
_UpperCAmelCase =None
return state
def __setstate__( self , _snake_case ):
_UpperCAmelCase =d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_UpperCAmelCase ={}
_UpperCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case = None ):
if not os.path.isdir(_snake_case ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
_UpperCAmelCase =os.path.join(
_snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case , "wb" ) as fi:
_UpperCAmelCase =self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
| 592
| 0
|
"""simple docstring"""
from __future__ import annotations
def A__ ( __lowerCamelCase, __lowerCamelCase = None, __lowerCamelCase = None, __lowerCamelCase = False, ):
"""simple docstring"""
_lowerCAmelCase = cipher_alphabet or [chr(__lowerCamelCase ) for i in range(9_7, 1_2_3 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
_lowerCAmelCase = {
'a': 0.0_8497,
'b': 0.0_1492,
'c': 0.0_2202,
'd': 0.0_4253,
'e': 0.1_1162,
'f': 0.0_2228,
'g': 0.0_2015,
'h': 0.0_6094,
'i': 0.0_7546,
'j': 0.0_0153,
'k': 0.0_1292,
'l': 0.0_4025,
'm': 0.0_2406,
'n': 0.0_6749,
'o': 0.0_7507,
'p': 0.0_1929,
'q': 0.0_0095,
'r': 0.0_7587,
's': 0.0_6327,
't': 0.0_9356,
'u': 0.0_2758,
'v': 0.0_0978,
'w': 0.0_2560,
'x': 0.0_0150,
'y': 0.0_1994,
'z': 0.0_0077,
}
else:
# Custom frequencies dictionary
_lowerCAmelCase = frequencies_dict
if not case_sensitive:
_lowerCAmelCase = ciphertext.lower()
# Chi squared statistic values
_lowerCAmelCase = {}
# cycle through all of the shifts
for shift in range(len(__lowerCamelCase ) ):
_lowerCAmelCase = ''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
_lowerCAmelCase = (alphabet_letters.index(letter.lower() ) - shift) % len(
__lowerCamelCase )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
_lowerCAmelCase = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
_lowerCAmelCase = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
_lowerCAmelCase = decrypted_with_shift.lower().count(__lowerCamelCase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
_lowerCAmelCase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
_lowerCAmelCase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
_lowerCAmelCase = decrypted_with_shift.count(__lowerCamelCase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
_lowerCAmelCase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
_lowerCAmelCase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
_lowerCAmelCase = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(__lowerCamelCase ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
_lowerCAmelCase = min(
__lowerCamelCase, key=__lowerCamelCase, )
# Get all the data from the most likely cipher (key, decoded message)
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 589
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def A__ ( __lowerCamelCase ):
"""simple docstring"""
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def A__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
_lowerCAmelCase = key.replace('heads.cmd.mim_head.cls.predictions', 'mmm_image_head' )
_lowerCAmelCase = key.replace('heads.cmd.mlm_head.cls.predictions', 'mmm_text_head' )
_lowerCAmelCase = key.replace('heads.cmd.itm_head.cls', 'itm_head' )
_lowerCAmelCase = key.replace('heads.cmd.itm_head.pooler', 'itm_head.pooler' )
_lowerCAmelCase = key.replace('heads.cmd.clip_head.logit_scale', 'flava.logit_scale' )
_lowerCAmelCase = key.replace('heads.fairseq_mlm.cls.predictions', 'mlm_head' )
_lowerCAmelCase = key.replace('heads.imagenet.mim_head.cls.predictions', 'mim_head' )
_lowerCAmelCase = key.replace('mm_text_projection', 'flava.text_to_mm_projection' )
_lowerCAmelCase = key.replace('mm_image_projection', 'flava.image_to_mm_projection' )
_lowerCAmelCase = key.replace('image_encoder.module', 'flava.image_model' )
_lowerCAmelCase = key.replace('text_encoder.module', 'flava.text_model' )
_lowerCAmelCase = key.replace('mm_encoder.module.encoder.cls_token', 'flava.multimodal_model.cls_token' )
_lowerCAmelCase = key.replace('mm_encoder.module', 'flava.multimodal_model' )
_lowerCAmelCase = key.replace('text_projection', 'flava.text_projection' )
_lowerCAmelCase = key.replace('image_projection', 'flava.image_projection' )
_lowerCAmelCase = value.float()
for key, value in codebook_state_dict.items():
_lowerCAmelCase = value
return upgrade
@torch.no_grad()
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None ):
"""simple docstring"""
if config_path is not None:
_lowerCAmelCase = FlavaConfig.from_pretrained(__lowerCamelCase )
else:
_lowerCAmelCase = FlavaConfig()
_lowerCAmelCase = FlavaForPreTraining(__lowerCamelCase ).eval()
_lowerCAmelCase = convert_dalle_checkpoint(__lowerCamelCase, __lowerCamelCase, save_checkpoint=__lowerCamelCase )
if os.path.exists(__lowerCamelCase ):
_lowerCAmelCase = torch.load(__lowerCamelCase, map_location='cpu' )
else:
_lowerCAmelCase = torch.hub.load_state_dict_from_url(__lowerCamelCase, map_location='cpu' )
_lowerCAmelCase = upgrade_state_dict(__lowerCamelCase, __lowerCamelCase )
hf_model.load_state_dict(__lowerCamelCase )
_lowerCAmelCase = hf_model.state_dict()
_lowerCAmelCase = count_parameters(__lowerCamelCase )
_lowerCAmelCase = count_parameters(__lowerCamelCase ) + count_parameters(__lowerCamelCase )
assert torch.allclose(__lowerCamelCase, __lowerCamelCase, atol=1e-3 )
hf_model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
a__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
a__ : str = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 589
| 1
|
import argparse
from collections import defaultdict
def __lowerCamelCase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] ) -> Tuple:
__UpperCamelCase : Any = f'{file}_{class_name}_{test_name}'
done_test[_id] += 1
with open(__lowerCAmelCase , """r""" ) as f:
__UpperCamelCase : Optional[Any] = f.readlines()
__UpperCamelCase : List[Any] = f'class {class_name}('
__UpperCamelCase : Union[str, Any] = f'{4 * " "}def {test_name}('
__UpperCamelCase : Tuple = f'{8 * " "}{correct_line.split()[0]}'
__UpperCamelCase : Tuple = f'{16 * " "}{correct_line.split()[0]}'
__UpperCamelCase : Tuple = False
__UpperCamelCase : List[str] = False
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : Any = False
__UpperCamelCase : Dict = 0
__UpperCamelCase : Tuple = 0
__UpperCamelCase : List[Any] = []
for line in lines:
if line.startswith(__lowerCAmelCase ):
__UpperCamelCase : Any = True
elif in_class and line.startswith(__lowerCAmelCase ):
__UpperCamelCase : Tuple = True
elif in_class and in_func and (line.startswith(__lowerCAmelCase ) or line.startswith(__lowerCAmelCase )):
__UpperCamelCase : str = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
__UpperCamelCase : Dict = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
__UpperCamelCase : str = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f'{spaces * " "}{correct_line}' )
__UpperCamelCase : Optional[int] = False
else:
new_lines.append(__lowerCAmelCase )
with open(__lowerCAmelCase , """w""" ) as f:
for line in new_lines:
f.write(__lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : Dict , __lowerCAmelCase : str=None ) -> Dict:
if fail is not None:
with open(__lowerCAmelCase , """r""" ) as f:
__UpperCamelCase : Any = {l.strip() for l in f.readlines()}
else:
__UpperCamelCase : int = None
with open(__lowerCAmelCase , """r""" ) as f:
__UpperCamelCase : str = f.readlines()
__UpperCamelCase : Tuple = defaultdict(__lowerCAmelCase )
for line in correct_lines:
__UpperCamelCase : Dict = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
UpperCamelCase = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 701
|
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('To use the rich extension, install rich with `pip install rich`')
| 515
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.