code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
a =TypeVar("""T""")
a =TypeVar("""U""")
class A_ ( Generic[T, U] ):
def __init__( self : str ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[int]):
__lowerCamelCase : Optional[int] = key
__lowerCamelCase : Union[str, Any] = val
__lowerCamelCase : DoubleLinkedListNode[T, U] | None = None
__lowerCamelCase : DoubleLinkedListNode[T, U] | None = None
def __repr__( self : int):
return (
F"Node: key: {self.key}, val: {self.val}, "
F"has next: {bool(self.next)}, has prev: {bool(self.prev)}"
)
class A_ ( Generic[T, U] ):
def __init__( self : Tuple):
__lowerCamelCase : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = self.rear, self.head
def __repr__( self : Tuple):
__lowerCamelCase : int = ['DoubleLinkedList']
__lowerCamelCase : Optional[int] = self.head
while node.next is not None:
rep.append(str(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : int = node.next
rep.append(str(self.rear))
return ",\n ".join(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
__lowerCamelCase : List[Any] = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
__lowerCamelCase : Union[str, Any] = node
__lowerCamelCase : Dict = previous
__lowerCamelCase : Union[str, Any] = node
__lowerCamelCase : Union[str, Any] = self.rear
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Dict):
if node.prev is None or node.next is None:
return None
__lowerCamelCase : Union[str, Any] = node.next
__lowerCamelCase : Union[str, Any] = node.prev
__lowerCamelCase : Tuple = None
__lowerCamelCase : int = None
return node
class A_ ( Generic[T, U] ):
_UpperCAmelCase : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : List[str]):
__lowerCamelCase : DoubleLinkedList[T, U] = DoubleLinkedList()
__lowerCamelCase : Optional[Any] = capacity
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : str = 0
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self : int):
return (
F"CacheInfo(hits={self.hits}, misses={self.miss}, "
F"capacity={self.capacity}, current size={self.num_keys})"
)
def __contains__( self : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[int]):
return key in self.cache
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : str):
if key in self.cache:
self.hits += 1
__lowerCamelCase : DoubleLinkedListNode[T, U] = self.cache[key]
__lowerCamelCase : List[Any] = self.list.remove(self.cache[key])
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(SCREAMING_SNAKE_CASE__)
return node.val
self.miss += 1
return None
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Dict):
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
__lowerCamelCase : List[str] = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(SCREAMING_SNAKE_CASE__) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
__lowerCamelCase : Union[str, Any] = DoubleLinkedListNode(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
self.list.add(self.cache[key])
self.num_keys += 1
else:
# bump node to the end of the list, update value
__lowerCamelCase : int = self.list.remove(self.cache[key])
assert node is not None # node guaranteed to be in list
__lowerCamelCase : str = value
self.list.add(SCREAMING_SNAKE_CASE__)
@classmethod
def lowerCAmelCase ( cls : List[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1_2_8):
def cache_decorator_inner(SCREAMING_SNAKE_CASE__ : Tuple) -> Callable[..., U]:
def cache_decorator_wrapper(*SCREAMING_SNAKE_CASE__ : str) -> U:
if func not in cls.decorator_function_to_instance_map:
__lowerCamelCase : int = LRUCache(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = cls.decorator_function_to_instance_map[func].get(args[0])
if result is None:
__lowerCamelCase : List[Any] = func(*SCREAMING_SNAKE_CASE__)
cls.decorator_function_to_instance_map[func].put(args[0] ,SCREAMING_SNAKE_CASE__)
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(SCREAMING_SNAKE_CASE__ ,'cache_info' ,SCREAMING_SNAKE_CASE__) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652
|
from maths.prime_factors import prime_factors
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_lowercase : str = F'''Input value of [number={number}] must be an integer'''
raise TypeError(lowerCamelCase_ )
if number < 1:
raise ValueError('Input must be a positive integer' )
return -1 if len(prime_factors(lowerCamelCase_ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89
| 0
|
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__A =logging.getLogger(__name__)
def a ( _UpperCAmelCase : torch.nn.Module , _UpperCAmelCase : BnbQuantizationConfig , _UpperCAmelCase : Union[str, os.PathLike] = None , _UpperCAmelCase : Optional[Dict[str, Union[int, str, torch.device]]] = None , _UpperCAmelCase : Optional[List[str]] = None , _UpperCAmelCase : Optional[Dict[Union[int, str], Union[int, str]]] = None , _UpperCAmelCase : Optional[Union[str, os.PathLike]] = None , _UpperCAmelCase : bool = False , ):
'''simple docstring'''
__UpperCAmelCase : List[str] = bnb_quantization_config.load_in_abit
__UpperCAmelCase : List[str] = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
__UpperCAmelCase : Dict = []
# custom device map
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(device_map.keys() ) > 1:
__UpperCAmelCase : List[str] = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
__UpperCAmelCase : Optional[int] = get_keys_to_not_convert(_UpperCAmelCase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_UpperCAmelCase )
__UpperCAmelCase : Optional[int] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
__UpperCAmelCase : int = []
__UpperCAmelCase : int = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_UpperCAmelCase )
# compatibility with peft
__UpperCAmelCase : Optional[int] = load_in_abit
__UpperCAmelCase : List[str] = load_in_abit
__UpperCAmelCase : str = get_parameter_device(_UpperCAmelCase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
__UpperCAmelCase : Tuple = replace_with_bnb_layers(_UpperCAmelCase , _UpperCAmelCase , modules_to_not_convert=_UpperCAmelCase )
# convert param to the right dtype
__UpperCAmelCase : List[Any] = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
__UpperCAmelCase : str = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
__UpperCAmelCase : int = getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_UpperCAmelCase ):
param.to(_UpperCAmelCase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
f'The model device type is {model_device.type}. However, cuda is needed for quantization.'
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
f'`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ' )
else:
with init_empty_weights():
__UpperCAmelCase : str = replace_with_bnb_layers(
_UpperCAmelCase , _UpperCAmelCase , modules_to_not_convert=_UpperCAmelCase )
__UpperCAmelCase : Optional[int] = get_quantized_model_device_map(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , max_memory=_UpperCAmelCase , no_split_module_classes=_UpperCAmelCase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : Any = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=_UpperCAmelCase , offload_state_dict=_UpperCAmelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(_UpperCAmelCase , device_map=_UpperCAmelCase , offload_dir=_UpperCAmelCase )
def a ( _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Optional[Any]=None ):
'''simple docstring'''
if device_map is None:
if torch.cuda.is_available():
__UpperCAmelCase : Tuple = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
__UpperCAmelCase : List[Any] = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
__UpperCAmelCase : Tuple = {}
__UpperCAmelCase : Union[str, Any] = special_dtypes
__UpperCAmelCase : Dict = no_split_module_classes
__UpperCAmelCase : Optional[Any] = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
__UpperCAmelCase : str = get_balanced_memory(
_UpperCAmelCase , low_zero=(device_map == '''balanced_low_0''') , max_memory=_UpperCAmelCase , **_UpperCAmelCase , )
__UpperCAmelCase : List[Any] = max_memory
__UpperCAmelCase : int = infer_auto_device_map(_UpperCAmelCase , **_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
# check if don't have any quantized module on the cpu
__UpperCAmelCase : List[str] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
__UpperCAmelCase : Optional[int] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def a ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : List[Any]=None ):
'''simple docstring'''
if modules_to_not_convert is None:
__UpperCAmelCase : str = []
__UpperCAmelCase , __UpperCAmelCase : List[str] = _replace_with_bnb_layers(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def a ( _UpperCAmelCase : Dict , _UpperCAmelCase : int , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Optional[int]=None , ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = False
for name, module in model.named_children():
if current_key_name is None:
__UpperCAmelCase : int = []
current_key_name.append(_UpperCAmelCase )
if isinstance(_UpperCAmelCase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
__UpperCAmelCase : Optional[Any] = '''.'''.join(_UpperCAmelCase )
__UpperCAmelCase : List[Any] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
__UpperCAmelCase : int = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
__UpperCAmelCase : int = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_UpperCAmelCase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
__UpperCAmelCase : Optional[int] = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
__UpperCAmelCase : List[Any] = module.weight.data
if module.bias is not None:
__UpperCAmelCase : Union[str, Any] = module.bias.data
bnb_module.requires_grad_(_UpperCAmelCase )
setattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = True
if len(list(module.children() ) ) > 0:
__UpperCAmelCase , __UpperCAmelCase : Any = _replace_with_bnb_layers(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__UpperCAmelCase : List[str] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def a ( _UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
with init_empty_weights():
__UpperCAmelCase : Optional[Any] = deepcopy(_UpperCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
__UpperCAmelCase : Union[str, Any] = find_tied_parameters(_UpperCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__UpperCAmelCase : Optional[Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
__UpperCAmelCase : str = sum(_UpperCAmelCase , [] )
__UpperCAmelCase : Any = len(_UpperCAmelCase ) > 0
# Check if it is a base model
__UpperCAmelCase : int = False
if hasattr(_UpperCAmelCase , '''base_model_prefix''' ):
__UpperCAmelCase : Union[str, Any] = not hasattr(_UpperCAmelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
__UpperCAmelCase : Dict = list(model.named_children() )
__UpperCAmelCase : Union[str, Any] = [list_modules[-1][0]]
# add last module together with tied weights
__UpperCAmelCase : Tuple = set(_UpperCAmelCase ) - set(_UpperCAmelCase )
__UpperCAmelCase : List[Any] = list(set(_UpperCAmelCase ) ) + list(_UpperCAmelCase )
# remove ".weight" from the keys
__UpperCAmelCase : Optional[int] = ['''.weight''', '''.bias''']
__UpperCAmelCase : Union[str, Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
__UpperCAmelCase : List[str] = name.replace(_UpperCAmelCase , '''''' )
filtered_module_names.append(_UpperCAmelCase )
return filtered_module_names
def a ( _UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
for m in model.modules():
if isinstance(_UpperCAmelCase , bnb.nn.Linearabit ):
return True
return False
def a ( _UpperCAmelCase : nn.Module ):
'''simple docstring'''
return next(parameter.parameters() ).device
def a ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
if fpaa_statistics is None:
set_module_tensor_to_device(_UpperCAmelCase , _UpperCAmelCase , 0 , dtype=_UpperCAmelCase , value=_UpperCAmelCase )
__UpperCAmelCase : List[str] = param_name
__UpperCAmelCase : str = model
if "." in tensor_name:
__UpperCAmelCase : Dict = tensor_name.split('''.''' )
for split in splits[:-1]:
__UpperCAmelCase : str = getattr(_UpperCAmelCase , _UpperCAmelCase )
if new_module is None:
raise ValueError(f'{module} has no attribute {split}.' )
__UpperCAmelCase : Dict = new_module
__UpperCAmelCase : List[Any] = splits[-1]
# offload weights
__UpperCAmelCase : List[str] = False
offload_weight(module._parameters[tensor_name] , _UpperCAmelCase , _UpperCAmelCase , index=_UpperCAmelCase )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , _UpperCAmelCase , index=_UpperCAmelCase , )
else:
offload_weight(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , index=_UpperCAmelCase )
offload_weight(_UpperCAmelCase , param_name.replace('''weight''' , '''SCB''' ) , _UpperCAmelCase , index=_UpperCAmelCase )
set_module_tensor_to_device(_UpperCAmelCase , _UpperCAmelCase , '''meta''' , dtype=_UpperCAmelCase , value=torch.empty(*param.size() ) )
| 241
|
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
__A ={name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def a ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
__UpperCAmelCase : Optional[Any] = TOKENIZER_CLASSES
else:
__UpperCAmelCase : List[str] = {tokenizer_name: getattr(_UpperCAmelCase , tokenizer_name + '''Fast''' )}
logger.info(f'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
__UpperCAmelCase : Any = TOKENIZER_CLASSES[tokenizer_name]
__UpperCAmelCase : Any = True
if checkpoint_name is None:
__UpperCAmelCase : int = list(tokenizer_class.max_model_input_sizes.keys() )
else:
__UpperCAmelCase : List[Any] = [checkpoint_name]
logger.info(f'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(f'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
__UpperCAmelCase : Optional[Any] = tokenizer_class.from_pretrained(_UpperCAmelCase , force_download=_UpperCAmelCase )
# Save fast tokenizer
logger.info(f'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = checkpoint.split('''/''' )
__UpperCAmelCase : List[str] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
elif add_prefix:
__UpperCAmelCase : List[str] = checkpoint
__UpperCAmelCase : List[Any] = dump_path
else:
__UpperCAmelCase : Any = None
__UpperCAmelCase : Tuple = dump_path
logger.info(f'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
__UpperCAmelCase : Any = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
__UpperCAmelCase : str = file_path.split(_UpperCAmelCase )[-1][0]
if next_char == "/":
__UpperCAmelCase : Union[str, Any] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = None
logger.info(f'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
__UpperCAmelCase : Optional[int] = tokenizer.save_pretrained(
_UpperCAmelCase , legacy_format=_UpperCAmelCase , filename_prefix=_UpperCAmelCase )
logger.info(f'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith('''tokenizer.json''' ):
os.remove(_UpperCAmelCase )
logger.info(f'=> removing {file_name}' )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
f'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '''
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
__A =parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 241
| 1
|
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class snake_case_ :
"""simple docstring"""
@staticmethod
def UpperCAmelCase__ ( *lowerCamelCase_ , **lowerCamelCase_) -> Optional[Any]:
pass
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = np.array(_lowercase )
UpperCamelCase = npimg.shape
return {"hash": hashimage(_lowercase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
A_ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
A_ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Any:
UpperCamelCase = MaskGenerationPipeline(model=lowerCamelCase_ , image_processor=lowerCamelCase_)
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_) -> Union[str, Any]:
pass
@require_tf
@unittest.skip('''Image segmentation not implemented in TF''')
def UpperCAmelCase__ ( self) -> Optional[Any]:
pass
@slow
@require_torch
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''')
UpperCamelCase = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=2_5_6)
# Shortening by hashing
UpperCamelCase = []
for i, o in enumerate(outputs['''masks''']):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase_), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.021},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0053},
{'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9967},
{'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.993},
{'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9909},
{'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9879},
{'''mask''': {'''hash''': '''801064ff79''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9834},
{'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9716},
{'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9612},
{'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9599},
{'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9552},
{'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9532},
{'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9516},
{'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9499},
{'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9483},
{'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9464},
{'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''c749b25868''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9408},
{'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9335},
{'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9326},
{'''mask''': {'''hash''': '''788b798e24''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9262},
{'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8999},
{'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8986},
{'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8984},
{'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8873},
{'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8871}
] , )
# fmt: on
@require_torch
@slow
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = '''facebook/sam-vit-huge'''
UpperCamelCase = pipeline('''mask-generation''' , model=lowerCamelCase_)
UpperCamelCase = image_segmenter(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=2_5_6)
# Shortening by hashing
UpperCamelCase = []
for i, o in enumerate(outputs['''masks''']):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase_), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0210},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0053},
] , )
| 34
|
"""simple docstring"""
import os
import sys
import unittest
SCREAMING_SNAKE_CASE_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
SCREAMING_SNAKE_CASE_ = os.path.join(git_repo_path, 'src', 'transformers')
SCREAMING_SNAKE_CASE_ = '\n{0} = None\n'
SCREAMING_SNAKE_CASE_ = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
SCREAMING_SNAKE_CASE_ = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''')
self.assertIsNone(lowerCamelCase_)
UpperCamelCase = find_backend(''' if not is_tokenizers_available():''')
self.assertEqual(lowerCamelCase_ , '''tokenizers''')
UpperCamelCase = find_backend(''' if not is_tensorflow_text_available():''')
self.assertEqual(lowerCamelCase_ , '''tensorflow_text''')
UpperCamelCase = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tokenizers''')
UpperCamelCase = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tensorflow_text''')
UpperCamelCase = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tokenizers_and_vision''')
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , lowerCamelCase_)
self.assertIn('''tensorflow_text''' , lowerCamelCase_)
self.assertIn('''sentencepiece_and_tokenizers''' , lowerCamelCase_)
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertModel''' , objects['''tf'''])
self.assertIn('''FlaxBertModel''' , objects['''flax'''])
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''])
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''])
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = create_dummy_object('''CONSTANT''' , '''\'torch\'''')
self.assertEqual(lowerCamelCase_ , '''\nCONSTANT = None\n''')
UpperCamelCase = create_dummy_object('''function''' , '''\'torch\'''')
self.assertEqual(
lowerCamelCase_ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''')
UpperCamelCase = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
UpperCamelCase = create_dummy_object('''FakeClass''' , '''\'torch\'''')
self.assertEqual(lowerCamelCase_ , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
UpperCamelCase = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']})
self.assertEqual(dummy_files['''torch'''] , lowerCamelCase_)
| 34
| 1
|
'''simple docstring'''
class SCREAMING_SNAKE_CASE__ : # Public class to implement a graph
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Dict = row
__a : Optional[int] = col
__a : int = graph
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Tuple = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__a : Optional[Any] = [-1, 0, 1, -1, 1, -1, 0, 1]
__a : Tuple = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , __UpperCamelCase ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , __UpperCamelCase )
def __lowerCamelCase ( self ): # And finally, count all islands.
'''simple docstring'''
__a : Optional[int] = [[False for j in range(self.COL )] for i in range(self.ROW )]
__a : Optional[Any] = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
count += 1
return count
| 697
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : Optional[Any] = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = [
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 697
| 1
|
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
_lowerCAmelCase = 'CompVis/stable-diffusion-v1-1'
_lowerCAmelCase = 'CompVis/stable-diffusion-v1-2'
_lowerCAmelCase = 'CompVis/stable-diffusion-v1-3'
_lowerCAmelCase = 'CompVis/stable-diffusion-v1-4'
class UpperCamelCase (__snake_case ):
def __init__( self :Any , __magic_name__ :AutoencoderKL , __magic_name__ :CLIPTextModel , __magic_name__ :CLIPTokenizer , __magic_name__ :UNetaDConditionModel , __magic_name__ :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __magic_name__ :StableDiffusionSafetyChecker , __magic_name__ :CLIPImageProcessor , __magic_name__ :bool = True , ) ->str:
super()._init_()
lowercase : Optional[int] = StableDiffusionPipeline.from_pretrained(__magic_name__ )
lowercase : Optional[Any] = StableDiffusionPipeline.from_pretrained(__magic_name__ )
lowercase : Tuple = StableDiffusionPipeline.from_pretrained(__magic_name__ )
lowercase : Tuple = StableDiffusionPipeline(
vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , safety_checker=__magic_name__ , feature_extractor=__magic_name__ , requires_safety_checker=__magic_name__ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def __snake_case ( self :List[str] ) ->Dict[str, Any]:
return {k: getattr(self , __magic_name__ ) for k in self.config.keys() if not k.startswith("""_""" )}
def __snake_case ( self :Optional[int] , __magic_name__ :Optional[Union[str, int]] = "auto" ) ->str:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase : Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__magic_name__ )
def __snake_case ( self :Union[str, Any] ) ->int:
self.enable_attention_slicing(__magic_name__ )
@torch.no_grad()
def __snake_case ( self :Optional[int] , __magic_name__ :Union[str, List[str]] , __magic_name__ :int = 512 , __magic_name__ :int = 512 , __magic_name__ :int = 50 , __magic_name__ :float = 7.5 , __magic_name__ :Optional[Union[str, List[str]]] = None , __magic_name__ :Optional[int] = 1 , __magic_name__ :float = 0.0 , __magic_name__ :Optional[torch.Generator] = None , __magic_name__ :Optional[torch.FloatTensor] = None , __magic_name__ :Optional[str] = "pil" , __magic_name__ :bool = True , __magic_name__ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ :int = 1 , **__magic_name__ :Tuple , ) ->Dict:
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def __snake_case ( self :Optional[int] , __magic_name__ :Union[str, List[str]] , __magic_name__ :int = 512 , __magic_name__ :int = 512 , __magic_name__ :int = 50 , __magic_name__ :float = 7.5 , __magic_name__ :Optional[Union[str, List[str]]] = None , __magic_name__ :Optional[int] = 1 , __magic_name__ :float = 0.0 , __magic_name__ :Optional[torch.Generator] = None , __magic_name__ :Optional[torch.FloatTensor] = None , __magic_name__ :Optional[str] = "pil" , __magic_name__ :bool = True , __magic_name__ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ :int = 1 , **__magic_name__ :Any , ) ->Tuple:
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def __snake_case ( self :List[Any] , __magic_name__ :Union[str, List[str]] , __magic_name__ :int = 512 , __magic_name__ :int = 512 , __magic_name__ :int = 50 , __magic_name__ :float = 7.5 , __magic_name__ :Optional[Union[str, List[str]]] = None , __magic_name__ :Optional[int] = 1 , __magic_name__ :float = 0.0 , __magic_name__ :Optional[torch.Generator] = None , __magic_name__ :Optional[torch.FloatTensor] = None , __magic_name__ :Optional[str] = "pil" , __magic_name__ :bool = True , __magic_name__ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ :int = 1 , **__magic_name__ :List[Any] , ) ->List[Any]:
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def __snake_case ( self :int , __magic_name__ :Union[str, List[str]] , __magic_name__ :int = 512 , __magic_name__ :int = 512 , __magic_name__ :int = 50 , __magic_name__ :float = 7.5 , __magic_name__ :Optional[Union[str, List[str]]] = None , __magic_name__ :Optional[int] = 1 , __magic_name__ :float = 0.0 , __magic_name__ :Optional[torch.Generator] = None , __magic_name__ :Optional[torch.FloatTensor] = None , __magic_name__ :Optional[str] = "pil" , __magic_name__ :bool = True , __magic_name__ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ :int = 1 , **__magic_name__ :List[str] , ) ->Dict:
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def __snake_case ( self :Dict , __magic_name__ :Union[str, List[str]] , __magic_name__ :int = 512 , __magic_name__ :int = 512 , __magic_name__ :int = 50 , __magic_name__ :float = 7.5 , __magic_name__ :Optional[Union[str, List[str]]] = None , __magic_name__ :Optional[int] = 1 , __magic_name__ :float = 0.0 , __magic_name__ :Optional[torch.Generator] = None , __magic_name__ :Optional[torch.FloatTensor] = None , __magic_name__ :Optional[str] = "pil" , __magic_name__ :bool = True , __magic_name__ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ :int = 1 , **__magic_name__ :Union[str, Any] , ) ->Optional[Any]:
lowercase : Optional[Any] = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(__magic_name__ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
lowercase : str = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get first result from Stable Diffusion Checkpoint v1.2
lowercase : Union[str, Any] = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get first result from Stable Diffusion Checkpoint v1.3
lowercase : Optional[Any] = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get first result from Stable Diffusion Checkpoint v1.4
lowercase : List[str] = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 264
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
_lowerCAmelCase = logging.get_logger(__name__)
def UpperCamelCase ( _A ) -> List[List[ImageInput]]:
if isinstance(_A , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_A , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_A ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class UpperCamelCase (__snake_case ):
_SCREAMING_SNAKE_CASE : str = ["""pixel_values"""]
def __init__( self :str , __magic_name__ :bool = True , __magic_name__ :Dict[str, int] = None , __magic_name__ :PILImageResampling = PILImageResampling.BILINEAR , __magic_name__ :bool = True , __magic_name__ :Dict[str, int] = None , __magic_name__ :bool = True , __magic_name__ :Union[int, float] = 1 / 255 , __magic_name__ :bool = True , __magic_name__ :bool = True , __magic_name__ :Optional[Union[float, List[float]]] = None , __magic_name__ :Optional[Union[float, List[float]]] = None , **__magic_name__ :List[str] , ) ->None:
super().__init__(**__magic_name__ )
lowercase : Union[str, Any] = size if size is not None else {"""shortest_edge""": 256}
lowercase : Tuple = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowercase : List[Any] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowercase : List[str] = get_size_dict(__magic_name__ , param_name="""crop_size""" )
lowercase : Tuple = do_resize
lowercase : Optional[Any] = size
lowercase : Union[str, Any] = do_center_crop
lowercase : List[Any] = crop_size
lowercase : Union[str, Any] = resample
lowercase : Optional[Any] = do_rescale
lowercase : int = rescale_factor
lowercase : Dict = offset
lowercase : Optional[int] = do_normalize
lowercase : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __snake_case ( self :Any , __magic_name__ :np.ndarray , __magic_name__ :Dict[str, int] , __magic_name__ :PILImageResampling = PILImageResampling.BILINEAR , __magic_name__ :Optional[Union[str, ChannelDimension]] = None , **__magic_name__ :Optional[Any] , ) ->np.ndarray:
lowercase : Optional[int] = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
if "shortest_edge" in size:
lowercase : int = get_resize_output_image_size(__magic_name__ , size["""shortest_edge"""] , default_to_square=__magic_name__ )
elif "height" in size and "width" in size:
lowercase : Tuple = (size["""height"""], size["""width"""])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def __snake_case ( self :Optional[Any] , __magic_name__ :np.ndarray , __magic_name__ :Dict[str, int] , __magic_name__ :Optional[Union[str, ChannelDimension]] = None , **__magic_name__ :str , ) ->np.ndarray:
lowercase : Any = get_size_dict(__magic_name__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(__magic_name__ , size=(size["""height"""], size["""width"""]) , data_format=__magic_name__ , **__magic_name__ )
def __snake_case ( self :Tuple , __magic_name__ :np.ndarray , __magic_name__ :Union[int, float] , __magic_name__ :bool = True , __magic_name__ :Optional[Union[str, ChannelDimension]] = None , **__magic_name__ :Optional[Any] , ) ->Any:
lowercase : Optional[int] = image.astype(np.floataa )
if offset:
lowercase : Optional[int] = image - (scale / 2)
return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def __snake_case ( self :int , __magic_name__ :np.ndarray , __magic_name__ :Union[float, List[float]] , __magic_name__ :Union[float, List[float]] , __magic_name__ :Optional[Union[str, ChannelDimension]] = None , **__magic_name__ :Union[str, Any] , ) ->np.ndarray:
return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def __snake_case ( self :List[Any] , __magic_name__ :ImageInput , __magic_name__ :bool = None , __magic_name__ :Dict[str, int] = None , __magic_name__ :PILImageResampling = None , __magic_name__ :bool = None , __magic_name__ :Dict[str, int] = None , __magic_name__ :bool = None , __magic_name__ :float = None , __magic_name__ :bool = None , __magic_name__ :bool = None , __magic_name__ :Optional[Union[float, List[float]]] = None , __magic_name__ :Optional[Union[float, List[float]]] = None , __magic_name__ :Optional[ChannelDimension] = ChannelDimension.FIRST , ) ->np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
if offset and not do_rescale:
raise ValueError("""For offset, do_rescale must also be set to True.""" )
# All transformations expect numpy arrays.
lowercase : Optional[Any] = to_numpy_array(__magic_name__ )
if do_resize:
lowercase : Any = self.resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ )
if do_center_crop:
lowercase : Any = self.center_crop(__magic_name__ , size=__magic_name__ )
if do_rescale:
lowercase : Optional[int] = self.rescale(image=__magic_name__ , scale=__magic_name__ , offset=__magic_name__ )
if do_normalize:
lowercase : Union[str, Any] = self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ )
lowercase : Optional[Any] = to_channel_dimension_format(__magic_name__ , __magic_name__ )
return image
def __snake_case ( self :Union[str, Any] , __magic_name__ :ImageInput , __magic_name__ :bool = None , __magic_name__ :Dict[str, int] = None , __magic_name__ :PILImageResampling = None , __magic_name__ :bool = None , __magic_name__ :Dict[str, int] = None , __magic_name__ :bool = None , __magic_name__ :float = None , __magic_name__ :bool = None , __magic_name__ :bool = None , __magic_name__ :Optional[Union[float, List[float]]] = None , __magic_name__ :Optional[Union[float, List[float]]] = None , __magic_name__ :Optional[Union[str, TensorType]] = None , __magic_name__ :ChannelDimension = ChannelDimension.FIRST , **__magic_name__ :List[str] , ) ->PIL.Image.Image:
lowercase : List[Any] = do_resize if do_resize is not None else self.do_resize
lowercase : Dict = resample if resample is not None else self.resample
lowercase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
lowercase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : Optional[Any] = offset if offset is not None else self.offset
lowercase : str = do_normalize if do_normalize is not None else self.do_normalize
lowercase : List[str] = image_mean if image_mean is not None else self.image_mean
lowercase : Union[str, Any] = image_std if image_std is not None else self.image_std
lowercase : Optional[int] = size if size is not None else self.size
lowercase : Optional[Any] = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowercase : Tuple = crop_size if crop_size is not None else self.crop_size
lowercase : List[str] = get_size_dict(__magic_name__ , param_name="""crop_size""" )
if not valid_images(__magic_name__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
lowercase : Tuple = make_batched(__magic_name__ )
lowercase : List[str] = [
[
self._preprocess_image(
image=__magic_name__ , do_resize=__magic_name__ , size=__magic_name__ , resample=__magic_name__ , do_center_crop=__magic_name__ , crop_size=__magic_name__ , do_rescale=__magic_name__ , rescale_factor=__magic_name__ , offset=__magic_name__ , do_normalize=__magic_name__ , image_mean=__magic_name__ , image_std=__magic_name__ , data_format=__magic_name__ , )
for img in video
]
for video in videos
]
lowercase : Dict = {"""pixel_values""": videos}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 264
| 1
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCamelCase__( UpperCAmelCase_ , unittest.TestCase):
UpperCAmelCase__ : Dict = ShapEPipeline
UpperCAmelCase__ : List[Any] = ['prompt']
UpperCAmelCase__ : Tuple = ['prompt']
UpperCAmelCase__ : Union[str, Any] = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
UpperCAmelCase__ : int = False
@property
def lowerCAmelCase__ ( self: str ):
return 32
@property
def lowerCAmelCase__ ( self: str ):
return 32
@property
def lowerCAmelCase__ ( self: Tuple ):
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self: Any ):
return 8
@property
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def lowerCAmelCase__ ( self: List[Any] ):
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(_lowercase )
@property
def lowerCAmelCase__ ( self: Tuple ):
torch.manual_seed(0 )
__lowerCamelCase = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__lowerCamelCase = PriorTransformer(**_lowercase )
return model
@property
def lowerCAmelCase__ ( self: List[str] ):
torch.manual_seed(0 )
__lowerCamelCase = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__lowerCamelCase = ShapERenderer(**_lowercase )
return model
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = self.dummy_prior
__lowerCamelCase = self.dummy_text_encoder
__lowerCamelCase = self.dummy_tokenizer
__lowerCamelCase = self.dummy_renderer
__lowerCamelCase = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=10_24 , prediction_type="""sample""" , use_karras_sigmas=_lowercase , clip_sample=_lowercase , clip_sample_range=1.0 , )
__lowerCamelCase = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: List[Any]=0 ):
if str(_lowercase ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(_lowercase )
else:
__lowerCamelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__lowerCamelCase = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = 'cpu'
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**_lowercase )
__lowerCamelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__lowerCamelCase = pipe(**self.get_dummy_inputs(_lowercase ) )
__lowerCamelCase = output.images[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowerCamelCase = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self: Any ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = torch_device == 'cpu'
__lowerCamelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_lowercase , relax_max_difference=_lowercase , )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**_lowercase )
__lowerCamelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__lowerCamelCase = 1
__lowerCamelCase = 2
__lowerCamelCase = self.get_dummy_inputs(_lowercase )
for key in inputs.keys():
if key in self.batch_params:
__lowerCamelCase = batch_size * [inputs[key]]
__lowerCamelCase = pipe(**_lowercase , num_images_per_prompt=_lowercase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
__lowerCamelCase = ShapEPipeline.from_pretrained("""openai/shap-e""" )
__lowerCamelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__lowerCamelCase = torch.Generator(device=_lowercase ).manual_seed(0 )
__lowerCamelCase = pipe(
"""a shark""" , generator=_lowercase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_lowercase , _lowercase )
| 707
|
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowerCamelCase__( __lowerCamelCase):
@slow
@require_torch
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
__lowerCamelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" )
__lowerCamelCase = bertabert.config.encoder.vocab_size
__lowerCamelCase = tokenizer.sep_token_id
__lowerCamelCase = tokenizer.cls_token_id
__lowerCamelCase = 1_28
__lowerCamelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
__lowerCamelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
__lowerCamelCase = train_dataset.select(range(32 ) )
__lowerCamelCase = val_dataset.select(range(16 ) )
__lowerCamelCase = 4
def _map_to_encoder_decoder_inputs(UpperCamelCase_: List[Any] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__lowerCamelCase = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=UpperCamelCase_ , max_length=5_12 )
__lowerCamelCase = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=UpperCamelCase_ , max_length=1_28 )
__lowerCamelCase = inputs.input_ids
__lowerCamelCase = inputs.attention_mask
__lowerCamelCase = outputs.input_ids
__lowerCamelCase = outputs.input_ids.copy()
__lowerCamelCase = [
[-1_00 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
__lowerCamelCase = outputs.attention_mask
assert all(len(UpperCamelCase_ ) == 5_12 for x in inputs.input_ids )
assert all(len(UpperCamelCase_ ) == 1_28 for x in outputs.input_ids )
return batch
def _compute_metrics(UpperCamelCase_: int ):
__lowerCamelCase = pred.label_ids
__lowerCamelCase = pred.predictions
# all unnecessary tokens are removed
__lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
__lowerCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCamelCase_ ) )] ) / len(UpperCamelCase_ )
return {"accuracy": accuracy}
# map train dataset
__lowerCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCamelCase_ , batch_size=UpperCamelCase_ , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
__lowerCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCamelCase_ , batch_size=UpperCamelCase_ , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
__lowerCamelCase = self.get_auto_remove_tmp_dir()
__lowerCamelCase = SeqaSeqTrainingArguments(
output_dir=UpperCamelCase_ , per_device_train_batch_size=UpperCamelCase_ , per_device_eval_batch_size=UpperCamelCase_ , predict_with_generate=UpperCamelCase_ , evaluation_strategy="""steps""" , do_train=UpperCamelCase_ , do_eval=UpperCamelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__lowerCamelCase = SeqaSeqTrainer(
model=UpperCamelCase_ , args=UpperCamelCase_ , compute_metrics=_compute_metrics , train_dataset=UpperCamelCase_ , eval_dataset=UpperCamelCase_ , tokenizer=UpperCamelCase_ , )
# start training
trainer.train()
| 80
| 0
|
from __future__ import annotations
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase : List[Any] = order
# a_{0} ... a_{k}
UpperCamelCase : Tuple = [1.0] + [0.0] * order
# b_{0} ... b_{k}
UpperCamelCase : List[Any] = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
UpperCamelCase : Optional[int] = [0.0] * self.order
# y[n-1] ... y[n-k]
UpperCamelCase : Optional[int] = [0.0] * self.order
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None:
if len(SCREAMING_SNAKE_CASE_ ) < self.order:
UpperCamelCase : List[Any] = [1.0, *a_coeffs]
if len(SCREAMING_SNAKE_CASE_ ) != self.order + 1:
UpperCamelCase : Any = (
F"""Expected a_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE_ )}"""
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) != self.order + 1:
UpperCamelCase : Optional[Any] = (
F"""Expected b_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE_ )}"""
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = a_coeffs
UpperCamelCase : List[str] = b_coeffs
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> float:
UpperCamelCase : Optional[Any] = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1, self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
UpperCamelCase : List[Any] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
UpperCamelCase : Tuple = self.input_history[:-1]
UpperCamelCase : Optional[Any] = self.output_history[:-1]
UpperCamelCase : Optional[int] = sample
UpperCamelCase : Dict = result
return result
| 40
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 40
| 1
|
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__lowercase = '''src/diffusers'''
__lowercase = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
__lowercase = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
__lowercase = spec.loader.load_module()
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return line.startswith(_SCREAMING_SNAKE_CASE ) or len(_SCREAMING_SNAKE_CASE ) <= 1 or re.search(R'''^\s*\)(\s*->.*:|:)\s*$''' , _SCREAMING_SNAKE_CASE ) is not None
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ : int =object_name.split('''.''' )
lowerCAmelCase_ : str =0
# First let's find the module where our object lives.
lowerCAmelCase_ : Dict =parts[i]
while i < len(_SCREAMING_SNAKE_CASE ) and not os.path.isfile(os.path.join(_SCREAMING_SNAKE_CASE , f'{module}.py' ) ):
i += 1
if i < len(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ : Union[str, Any] =os.path.join(_SCREAMING_SNAKE_CASE , parts[i] )
if i >= len(_SCREAMING_SNAKE_CASE ):
raise ValueError(f'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(_SCREAMING_SNAKE_CASE , f'{module}.py' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCAmelCase_ : List[str] =f.readlines()
# Now let's find the class / func in the code!
lowerCAmelCase_ : Union[str, Any] =''''''
lowerCAmelCase_ : str =0
for name in parts[i + 1 :]:
while (
line_index < len(_SCREAMING_SNAKE_CASE ) and re.search(Rf'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(_SCREAMING_SNAKE_CASE ):
raise ValueError(f' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
lowerCAmelCase_ : Any =line_index
while line_index < len(_SCREAMING_SNAKE_CASE ) and _should_continue(lines[line_index] , _SCREAMING_SNAKE_CASE ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowerCAmelCase_ : Optional[int] =lines[start_index:line_index]
return "".join(_SCREAMING_SNAKE_CASE )
__lowercase = re.compile(R'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
__lowercase = re.compile(R'''^\s*(\S+)->(\S+)(\s+.*|$)''')
__lowercase = re.compile(R'''<FILL\s+[^>]*>''')
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ : List[Any] =code.split('''\n''' )
lowerCAmelCase_ : Union[str, Any] =0
while idx < len(_SCREAMING_SNAKE_CASE ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(_SCREAMING_SNAKE_CASE ):
return re.search(R'''^(\s*)\S''' , lines[idx] ).groups()[0]
return ""
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ : Optional[Any] =len(get_indent(_SCREAMING_SNAKE_CASE ) ) > 0
if has_indent:
lowerCAmelCase_ : Any =f'class Bla:\n{code}'
lowerCAmelCase_ : Any =black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : str =black.format_str(_SCREAMING_SNAKE_CASE , mode=_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] =style_docstrings_in_code(_SCREAMING_SNAKE_CASE )
return result[len('''class Bla:\n''' ) :] if has_indent else result
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
with open(_SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCAmelCase_ : Optional[int] =f.readlines()
lowerCAmelCase_ : str =[]
lowerCAmelCase_ : Union[str, Any] =0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ : List[str] =_re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[Any] =search.groups()
lowerCAmelCase_ : Tuple =find_code_in_diffusers(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : Dict =get_indent(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : Tuple =line_index + 1 if indent == theoretical_indent else line_index + 2
lowerCAmelCase_ : str =theoretical_indent
lowerCAmelCase_ : int =start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
lowerCAmelCase_ : Optional[Any] =True
while line_index < len(_SCREAMING_SNAKE_CASE ) and should_continue:
line_index += 1
if line_index >= len(_SCREAMING_SNAKE_CASE ):
break
lowerCAmelCase_ : Union[str, Any] =lines[line_index]
lowerCAmelCase_ : int =_should_continue(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and re.search(f'^{indent}# End copy' , _SCREAMING_SNAKE_CASE ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowerCAmelCase_ : Dict =lines[start_index:line_index]
lowerCAmelCase_ : int =''''''.join(_SCREAMING_SNAKE_CASE )
# Remove any nested `Copied from` comments to avoid circular copies
lowerCAmelCase_ : Any =[line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(_SCREAMING_SNAKE_CASE ) is None]
lowerCAmelCase_ : Tuple ='''\n'''.join(_SCREAMING_SNAKE_CASE )
# Before comparing, use the `replace_pattern` on the original code.
if len(_SCREAMING_SNAKE_CASE ) > 0:
lowerCAmelCase_ : Any =replace_pattern.replace('''with''' , '''''' ).split(''',''' )
lowerCAmelCase_ : Union[str, Any] =[_re_replace_pattern.search(_SCREAMING_SNAKE_CASE ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] =pattern.groups()
lowerCAmelCase_ : Dict =re.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if option.strip() == "all-casing":
lowerCAmelCase_ : List[str] =re.sub(obja.lower() , obja.lower() , _SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : Optional[Any] =re.sub(obja.upper() , obja.upper() , _SCREAMING_SNAKE_CASE )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
lowerCAmelCase_ : Optional[Any] =blackify(lines[start_index - 1] + theoretical_code )
lowerCAmelCase_ : Optional[Any] =theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
lowerCAmelCase_ : int =lines[:start_index] + [theoretical_code] + lines[line_index:]
lowerCAmelCase_ : Optional[int] =start_index + 1
if overwrite and len(_SCREAMING_SNAKE_CASE ) > 0:
# Warn the user a file has been modified.
print(f'Detected changes, rewriting {filename}.' )
with open(_SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(_SCREAMING_SNAKE_CASE )
return diffs
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE = False ):
lowerCAmelCase_ : Optional[int] =glob.glob(os.path.join(_SCREAMING_SNAKE_CASE , '''**/*.py''' ) , recursive=_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : int =[]
for filename in all_files:
lowerCAmelCase_ : List[str] =is_copy_consistent(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
diffs += [f'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(_SCREAMING_SNAKE_CASE ) > 0:
lowerCAmelCase_ : Any ='''\n'''.join(_SCREAMING_SNAKE_CASE )
raise Exception(
'''Found the following copy inconsistencies:\n'''
+ diff
+ '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__lowercase = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 305
|
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _snake_case ( lowerCAmelCase_ ):
"""simple docstring"""
def __A ( self : Optional[Any] ):
lowerCAmelCase_ : Dict =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase_ , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(UpperCamelCase_ , '''neck_hidden_sizes''' ) )
self.parent.assertTrue(hasattr(UpperCamelCase_ , '''num_attention_heads''' ) )
class _snake_case :
"""simple docstring"""
def __init__( self : str , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any]=13 , UpperCamelCase_ : Union[str, Any]=32 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : str=3 , UpperCamelCase_ : int=640 , UpperCamelCase_ : List[Any]=4 , UpperCamelCase_ : int="silu" , UpperCamelCase_ : Tuple=3 , UpperCamelCase_ : Tuple=32 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Any=0.0_2 , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : List[Any]=10 , UpperCamelCase_ : List[Any]=None , ):
lowerCAmelCase_ : List[str] =parent
lowerCAmelCase_ : Tuple =batch_size
lowerCAmelCase_ : Tuple =image_size
lowerCAmelCase_ : Any =patch_size
lowerCAmelCase_ : Any =num_channels
lowerCAmelCase_ : Dict =last_hidden_size
lowerCAmelCase_ : Optional[int] =num_attention_heads
lowerCAmelCase_ : str =hidden_act
lowerCAmelCase_ : Dict =conv_kernel_size
lowerCAmelCase_ : int =output_stride
lowerCAmelCase_ : Tuple =hidden_dropout_prob
lowerCAmelCase_ : Optional[Any] =attention_probs_dropout_prob
lowerCAmelCase_ : List[str] =classifier_dropout_prob
lowerCAmelCase_ : int =use_labels
lowerCAmelCase_ : Dict =is_training
lowerCAmelCase_ : Any =num_labels
lowerCAmelCase_ : Optional[Any] =initializer_range
lowerCAmelCase_ : List[str] =scope
def __A ( self : int ):
lowerCAmelCase_ : Any =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ : Any =None
lowerCAmelCase_ : Optional[Any] =None
if self.use_labels:
lowerCAmelCase_ : Optional[int] =ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase_ : Optional[Any] =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCAmelCase_ : str =self.get_config()
return config, pixel_values, labels, pixel_labels
def __A ( self : Optional[Any] ):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __A ( self : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Any ):
lowerCAmelCase_ : Optional[Any] =MobileViTModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase_ : Optional[Any] =model(UpperCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __A ( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : Any ):
lowerCAmelCase_ : List[str] =self.num_labels
lowerCAmelCase_ : Tuple =MobileViTForImageClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase_ : str =model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple ):
lowerCAmelCase_ : Optional[int] =self.num_labels
lowerCAmelCase_ : Any =MobileViTForSemanticSegmentation(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase_ : Union[str, Any] =model(UpperCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCAmelCase_ : Union[str, Any] =model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __A ( self : Dict ):
lowerCAmelCase_ : Any =self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int =config_and_inputs
lowerCAmelCase_ : Tuple ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : Optional[Any] = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
_UpperCamelCase : List[str] = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_UpperCamelCase : str = False
_UpperCamelCase : Any = False
_UpperCamelCase : Dict = False
_UpperCamelCase : Any = False
def __A ( self : List[str] ):
lowerCAmelCase_ : str =MobileViTModelTester(self )
lowerCAmelCase_ : int =MobileViTConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ )
def __A ( self : Dict ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViT does not use inputs_embeds''' )
def __A ( self : int ):
pass
@unittest.skip(reason='''MobileViT does not support input and output embeddings''' )
def __A ( self : Optional[int] ):
pass
@unittest.skip(reason='''MobileViT does not output attentions''' )
def __A ( self : List[str] ):
pass
def __A ( self : Tuple ):
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Optional[Any] =model_class(UpperCamelCase_ )
lowerCAmelCase_ : List[str] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : List[str] =[*signature.parameters.keys()]
lowerCAmelCase_ : Tuple =['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __A ( self : Tuple ):
pass
def __A ( self : Optional[int] ):
lowerCAmelCase_ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def __A ( self : Dict ):
def check_hidden_states_output(UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] ):
lowerCAmelCase_ : Union[str, Any] =model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
lowerCAmelCase_ : List[str] =model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase_ : Dict =outputs.hidden_states
lowerCAmelCase_ : Union[str, Any] =5
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCAmelCase_ : Union[str, Any] =2
for i in range(len(UpperCamelCase_ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Any =True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ : List[str] =True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def __A ( self : int ):
lowerCAmelCase_ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ )
def __A ( self : Optional[Any] ):
lowerCAmelCase_ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase_ )
@slow
def __A ( self : List[Any] ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Any =MobileViTModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def SCREAMING_SNAKE_CASE__ ( ):
lowerCAmelCase_ : Dict =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self : Dict ):
return MobileViTImageProcessor.from_pretrained('''apple/mobilevit-xx-small''' ) if is_vision_available() else None
@slow
def __A ( self : Union[str, Any] ):
lowerCAmelCase_ : Tuple =MobileViTForImageClassification.from_pretrained('''apple/mobilevit-xx-small''' ).to(UpperCamelCase_ )
lowerCAmelCase_ : Optional[Any] =self.default_image_processor
lowerCAmelCase_ : Optional[int] =prepare_img()
lowerCAmelCase_ : Union[str, Any] =image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase_ : int =model(**UpperCamelCase_ )
# verify the logits
lowerCAmelCase_ : Optional[int] =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
lowerCAmelCase_ : List[Any] =torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1E-4 ) )
@slow
def __A ( self : Union[str, Any] ):
lowerCAmelCase_ : List[str] =MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
lowerCAmelCase_ : Tuple =model.to(UpperCamelCase_ )
lowerCAmelCase_ : Any =MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
lowerCAmelCase_ : int =prepare_img()
lowerCAmelCase_ : Optional[int] =image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase_ : Optional[Any] =model(**UpperCamelCase_ )
lowerCAmelCase_ : List[str] =outputs.logits
# verify the logits
lowerCAmelCase_ : Dict =torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , UpperCamelCase_ )
lowerCAmelCase_ : List[str] =torch.tensor(
[
[[6.9_7_1_3, 6.9_7_8_6, 7.2_4_2_2], [7.2_8_9_3, 7.2_8_2_5, 7.4_4_4_6], [7.6_5_8_0, 7.8_7_9_7, 7.9_4_2_0]],
[[-1_0.6_8_6_9, -1_0.3_2_5_0, -1_0.3_4_7_1], [-1_0.4_2_2_8, -9.9_8_6_8, -9.7_1_3_2], [-1_1.0_4_0_5, -1_1.0_2_2_1, -1_0.7_3_1_8]],
[[-3.3_0_8_9, -2.8_5_3_9, -2.6_7_4_0], [-3.2_7_0_6, -2.5_6_2_1, -2.5_1_0_8], [-3.2_5_3_4, -2.6_6_1_5, -2.6_6_5_1]],
] , device=UpperCamelCase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
@slow
def __A ( self : Tuple ):
lowerCAmelCase_ : Optional[int] =MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
lowerCAmelCase_ : str =model.to(UpperCamelCase_ )
lowerCAmelCase_ : int =MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
lowerCAmelCase_ : Union[str, Any] =prepare_img()
lowerCAmelCase_ : str =image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase_ : Optional[int] =model(**UpperCamelCase_ )
lowerCAmelCase_ : str =outputs.logits.detach().cpu()
lowerCAmelCase_ : Any =image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase_ , target_sizes=[(50, 60)] )
lowerCAmelCase_ : Optional[int] =torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , UpperCamelCase_ )
lowerCAmelCase_ : Tuple =image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase_ )
lowerCAmelCase_ : List[str] =torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , UpperCamelCase_ )
| 305
| 1
|
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
snake_case_ : str = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
snake_case_ : List[Any] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
snake_case_ : int = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_000))
def __snake_case ( _UpperCAmelCase : str, _UpperCAmelCase : str):
UpperCamelCase = len([g for position, g in enumerate(snake_case__) if g == main_target[position]])
return (item, float(snake_case__))
def __snake_case ( _UpperCAmelCase : str, _UpperCAmelCase : str):
UpperCamelCase = random.randint(0, len(snake_case__) - 1)
UpperCamelCase = parent_a[:random_slice] + parent_a[random_slice:]
UpperCamelCase = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __snake_case ( _UpperCAmelCase : str, _UpperCAmelCase : list[str]):
UpperCamelCase = list(snake_case__)
if random.uniform(0, 1) < MUTATION_PROBABILITY:
UpperCamelCase = random.choice(snake_case__)
return "".join(snake_case__)
def __snake_case ( _UpperCAmelCase : tuple[str, float], _UpperCAmelCase : list[tuple[str, float]], _UpperCAmelCase : list[str], ):
UpperCamelCase = []
# Generate more children proportionally to the fitness score.
UpperCamelCase = int(parent_a[1] * 100) + 1
UpperCamelCase = 10 if child_n >= 10 else child_n
for _ in range(snake_case__):
UpperCamelCase = population_score[random.randint(0, snake_case__)][0]
UpperCamelCase = crossover(parent_a[0], snake_case__)
# Append new string to the population list.
pop.append(mutate(snake_case__, snake_case__))
pop.append(mutate(snake_case__, snake_case__))
return pop
def __snake_case ( _UpperCAmelCase : str, _UpperCAmelCase : list[str], _UpperCAmelCase : bool = True):
if N_POPULATION < N_SELECTED:
UpperCamelCase = f'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(snake_case__)
# Verify that the target contains no genes besides the ones inside genes variable.
UpperCamelCase = sorted({c for c in target if c not in genes})
if not_in_genes_list:
UpperCamelCase = f'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(snake_case__)
# Generate random starting population.
UpperCamelCase = []
for _ in range(snake_case__):
population.append(''''''.join([random.choice(snake_case__) for i in range(len(snake_case__))]))
# Just some logs to know what the algorithms is doing.
UpperCamelCase = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(snake_case__)
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
UpperCamelCase = [evaluate(snake_case__, snake_case__) for item in population]
# Check if there is a matching evolution.
UpperCamelCase = sorted(snake_case__, key=lambda _UpperCAmelCase: x[1], reverse=snake_case__)
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'\nGeneration: {generation}'
f'\nTotal Population:{total_population}'
f'\nBest score: {population_score[0][1]}'
f'\nBest string: {population_score[0][0]}')
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
UpperCamelCase = population[: int(N_POPULATION / 3)]
population.clear()
population.extend(snake_case__)
# Normalize population score to be between 0 and 1.
UpperCamelCase = [
(item, score / len(snake_case__)) for item, score in population_score
]
# This is selection
for i in range(snake_case__):
population.extend(select(population_score[int(snake_case__)], snake_case__, snake_case__))
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(snake_case__) > N_POPULATION:
break
if __name__ == "__main__":
snake_case_ : Optional[int] = (
'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'
)
snake_case_ : int = list(
' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'
'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'
)
snake_case_ , snake_case_ , snake_case_ : str = basic(target_str, genes_list)
print(
F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 212
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
_snake_case : Any = [True] * limit
_snake_case : Optional[Any] = False
_snake_case : List[str] = False
_snake_case : str = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
_snake_case : Optional[int] = i * 2
while index < limit:
_snake_case : Tuple = False
_snake_case : str = index + i
_snake_case : str = [2]
for i in range(3 , snake_case__ , 2 ):
if is_prime[i]:
primes.append(snake_case__ )
return primes
def UpperCAmelCase__ (snake_case__ : int = 1_00_00_00 ):
"""simple docstring"""
_snake_case : List[Any] = prime_sieve(snake_case__ )
_snake_case : Union[str, Any] = 0
_snake_case : Union[str, Any] = 0
for i in range(len(snake_case__ ) ):
for j in range(i + length , len(snake_case__ ) ):
_snake_case : Any = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
_snake_case : Optional[int] = j - i
_snake_case : Union[str, Any] = sol
return largest
if __name__ == "__main__":
print(F'''{solution() = }''')
| 609
| 0
|
'''simple docstring'''
import torch
from torch import nn
class _snake_case (nn.Module):
def __init__( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case=1 ,_snake_case=False ):
super().__init__()
UpperCAmelCase_ : Any = n_token
UpperCAmelCase_ : Dict = d_embed
UpperCAmelCase_ : List[str] = d_proj
UpperCAmelCase_ : List[str] = cutoffs + [n_token]
UpperCAmelCase_ : Optional[int] = [0] + self.cutoffs
UpperCAmelCase_ : int = div_val
UpperCAmelCase_ : int = self.cutoffs[0]
UpperCAmelCase_ : List[str] = len(self.cutoffs ) - 1
UpperCAmelCase_ : Any = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
UpperCAmelCase_ : List[str] = nn.Parameter(torch.zeros(self.n_clusters ,self.d_embed ) )
UpperCAmelCase_ : List[str] = nn.Parameter(torch.zeros(self.n_clusters ) )
UpperCAmelCase_ : Optional[int] = nn.ModuleList()
UpperCAmelCase_ : Any = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(_snake_case ,_snake_case ) ) )
else:
self.out_projs.append(_snake_case )
self.out_layers.append(nn.Linear(_snake_case ,_snake_case ) )
else:
for i in range(len(self.cutoffs ) ):
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase_ : Optional[Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(_snake_case ,_snake_case ) ) )
self.out_layers.append(nn.Linear(_snake_case ,r_idx - l_idx ) )
UpperCAmelCase_ : int = keep_order
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
if proj is None:
UpperCAmelCase_ : Union[str, Any] = nn.functional.linear(_snake_case ,_snake_case ,bias=_snake_case )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
UpperCAmelCase_ : Optional[int] = nn.functional.linear(_snake_case ,proj.t().contiguous() )
UpperCAmelCase_ : Optional[int] = nn.functional.linear(_snake_case ,_snake_case ,bias=_snake_case )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=None ,_snake_case=False ):
if labels is not None:
# Shift so that tokens < n predict n
UpperCAmelCase_ : Tuple = hidden[..., :-1, :].contiguous()
UpperCAmelCase_ : Dict = labels[..., 1:].contiguous()
UpperCAmelCase_ : int = hidden.view(-1 ,hidden.size(-1 ) )
UpperCAmelCase_ : Any = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("Input and labels should have the same size in the batch dimension." )
else:
UpperCAmelCase_ : Optional[int] = hidden.view(-1 ,hidden.size(-1 ) )
if self.n_clusters == 0:
UpperCAmelCase_ : Any = self._compute_logit(_snake_case ,self.out_layers[0].weight ,self.out_layers[0].bias ,self.out_projs[0] )
if labels is not None:
UpperCAmelCase_ : Optional[Any] = labels != -1_00
UpperCAmelCase_ : List[Any] = torch.zeros_like(_snake_case ,dtype=hidden.dtype ,device=hidden.device )
UpperCAmelCase_ : Union[str, Any] = (
-nn.functional.log_softmax(_snake_case ,dim=-1 )[mask].gather(1 ,labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
UpperCAmelCase_ : Optional[Any] = nn.functional.log_softmax(_snake_case ,dim=-1 )
else:
# construct weights and biases
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase_ : Union[str, Any] = self.out_layers[0].weight[l_idx:r_idx]
UpperCAmelCase_ : str = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCAmelCase_ : Optional[Any] = self.out_layers[i].weight
UpperCAmelCase_ : List[str] = self.out_layers[i].bias
if i == 0:
UpperCAmelCase_ : Union[str, Any] = torch.cat([weight_i, self.cluster_weight] ,dim=0 )
UpperCAmelCase_ : Any = torch.cat([bias_i, self.cluster_bias] ,dim=0 )
weights.append(_snake_case )
biases.append(_snake_case )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = weights[0], biases[0], self.out_projs[0]
UpperCAmelCase_ : Dict = self._compute_logit(_snake_case ,_snake_case ,_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[Any] = nn.functional.log_softmax(_snake_case ,dim=1 )
if labels is None:
UpperCAmelCase_ : str = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
UpperCAmelCase_ : int = torch.zeros_like(_snake_case ,dtype=hidden.dtype ,device=hidden.device )
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : int = [0] + self.cutoffs
for i in range(len(_snake_case ) - 1 ):
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
UpperCAmelCase_ : Union[str, Any] = (labels >= l_idx) & (labels < r_idx)
UpperCAmelCase_ : int = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
UpperCAmelCase_ : List[str] = labels.index_select(0 ,_snake_case ) - l_idx
UpperCAmelCase_ : str = head_logprob.index_select(0 ,_snake_case )
UpperCAmelCase_ : List[Any] = hidden.index_select(0 ,_snake_case )
else:
UpperCAmelCase_ : Union[str, Any] = hidden
if i == 0:
if labels is not None:
UpperCAmelCase_ : Union[str, Any] = head_logprob_i.gather(1 ,target_i[:, None] ).squeeze(1 )
else:
UpperCAmelCase_ : Any = head_logprob[:, : self.cutoffs[0]]
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = weights[i], biases[i], self.out_projs[i]
UpperCAmelCase_ : Any = self._compute_logit(_snake_case ,_snake_case ,_snake_case ,_snake_case )
UpperCAmelCase_ : Any = nn.functional.log_softmax(_snake_case ,dim=1 )
UpperCAmelCase_ : str = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
UpperCAmelCase_ : Union[str, Any] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 ,target_i[:, None] ).squeeze(1 )
else:
UpperCAmelCase_ : Dict = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
UpperCAmelCase_ : str = logprob_i
if labels is not None:
if (hasattr(self ,"keep_order" ) and self.keep_order) or keep_order:
out.index_copy_(0 ,_snake_case ,-logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def UpperCamelCase__ ( self ,_snake_case ):
if self.n_clusters == 0:
UpperCAmelCase_ : List[Any] = self._compute_logit(_snake_case ,self.out_layers[0].weight ,self.out_layers[0].bias ,self.out_projs[0] )
return nn.functional.log_softmax(_snake_case ,dim=-1 )
else:
# construct weights and biases
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase_ : Optional[int] = self.out_layers[0].weight[l_idx:r_idx]
UpperCAmelCase_ : Any = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCAmelCase_ : Dict = self.out_layers[i].weight
UpperCAmelCase_ : List[Any] = self.out_layers[i].bias
if i == 0:
UpperCAmelCase_ : Tuple = torch.cat([weight_i, self.cluster_weight] ,dim=0 )
UpperCAmelCase_ : List[Any] = torch.cat([bias_i, self.cluster_bias] ,dim=0 )
weights.append(_snake_case )
biases.append(_snake_case )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = weights[0], biases[0], self.out_projs[0]
UpperCAmelCase_ : Optional[Any] = self._compute_logit(_snake_case ,_snake_case ,_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = hidden.new_empty((head_logit.size(0 ), self.n_token) )
UpperCAmelCase_ : List[str] = nn.functional.log_softmax(_snake_case ,dim=1 )
UpperCAmelCase_ : str = [0] + self.cutoffs
for i in range(len(_snake_case ) - 1 ):
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
UpperCAmelCase_ : Any = head_logprob[:, : self.cutoffs[0]]
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = weights[i], biases[i], self.out_projs[i]
UpperCAmelCase_ : List[str] = self._compute_logit(_snake_case ,_snake_case ,_snake_case ,_snake_case )
UpperCAmelCase_ : str = nn.functional.log_softmax(_snake_case ,dim=1 )
UpperCAmelCase_ : str = head_logprob[:, -i] + tail_logprob_i
UpperCAmelCase_ : List[str] = logprob_i
return out
| 323
|
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
_lowerCamelCase = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
_lowerCamelCase = {"""facebook/blenderbot_small-90M""": 512}
def a__ ( _SCREAMING_SNAKE_CASE : Dict ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = set()
UpperCAmelCase_ : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase_ : Any = char
UpperCAmelCase_ : str = set(_SCREAMING_SNAKE_CASE )
return pairs
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : List[Any] =VOCAB_FILES_NAMES
__A : List[Any] =PRETRAINED_VOCAB_FILES_MAP
__A : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Union[str, Any] =["input_ids", "attention_mask"]
def __init__( self ,_snake_case ,_snake_case ,_snake_case="__start__" ,_snake_case="__end__" ,_snake_case="__unk__" ,_snake_case="__null__" ,**_snake_case ,):
super().__init__(unk_token=_snake_case ,bos_token=_snake_case ,eos_token=_snake_case ,pad_token=_snake_case ,**_snake_case )
with open(_snake_case ,encoding="utf-8" ) as vocab_handle:
UpperCAmelCase_ : Optional[int] = json.load(_snake_case )
UpperCAmelCase_ : Dict = {v: k for k, v in self.encoder.items()}
with open(_snake_case ,encoding="utf-8" ) as merges_handle:
UpperCAmelCase_ : int = merges_handle.read().split("\n" )[1:-1]
UpperCAmelCase_ : str = [tuple(merge.split() ) for merge in merges]
UpperCAmelCase_ : str = dict(zip(_snake_case ,range(len(_snake_case ) ) ) )
UpperCAmelCase_ : Any = {}
@property
def UpperCamelCase__ ( self ):
return len(self.encoder )
def UpperCamelCase__ ( self ):
return dict(self.encoder ,**self.added_tokens_encoder )
def UpperCamelCase__ ( self ,_snake_case ):
if token in self.cache:
return self.cache[token]
UpperCAmelCase_ : List[Any] = re.sub("([.,!?()])" ,R" \1" ,_snake_case )
UpperCAmelCase_ : Tuple = re.sub("(')" ,R" \1 " ,_snake_case )
UpperCAmelCase_ : Optional[int] = re.sub(R"\s{2,}" ," " ,_snake_case )
if "\n" in token:
UpperCAmelCase_ : Optional[int] = token.replace("\n" ," __newln__" )
UpperCAmelCase_ : List[str] = token.split(" " )
UpperCAmelCase_ : List[str] = []
for token in tokens:
if not len(_snake_case ):
continue
UpperCAmelCase_ : Optional[Any] = token.lower()
UpperCAmelCase_ : Union[str, Any] = tuple(_snake_case )
UpperCAmelCase_ : List[Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
UpperCAmelCase_ : List[Any] = get_pairs(_snake_case )
if not pairs:
words.append(_snake_case )
continue
while True:
UpperCAmelCase_ : Tuple = min(_snake_case ,key=lambda _snake_case : self.bpe_ranks.get(_snake_case ,float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = bigram
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : Optional[int] = 0
while i < len(_snake_case ):
try:
UpperCAmelCase_ : List[Any] = word.index(_snake_case ,_snake_case )
new_word.extend(word[i:j] )
UpperCAmelCase_ : Optional[Any] = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(_snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase_ : Tuple = tuple(_snake_case )
UpperCAmelCase_ : Union[str, Any] = new_word
if len(_snake_case ) == 1:
break
else:
UpperCAmelCase_ : int = get_pairs(_snake_case )
UpperCAmelCase_ : List[Any] = "@@ ".join(_snake_case )
UpperCAmelCase_ : Tuple = word[:-4]
UpperCAmelCase_ : int = word
words.append(_snake_case )
return " ".join(_snake_case )
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : List[Any] = re.findall(R"\S+\n?" ,_snake_case )
for token in words:
split_tokens.extend(list(self.bpe(_snake_case ).split(" " ) ) )
return split_tokens
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : Union[str, Any] = token.lower()
return self.encoder.get(_snake_case ,self.encoder.get(self.unk_token ) )
def UpperCamelCase__ ( self ,_snake_case ):
return self.decoder.get(_snake_case ,self.unk_token )
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : int = " ".join(_snake_case ).replace("@@ " ,"" ).strip()
return out_string
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ):
if not os.path.isdir(_snake_case ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase_ : Dict = os.path.join(
_snake_case ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ : int = os.path.join(
_snake_case ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_snake_case ,"w" ,encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_snake_case ,ensure_ascii=_snake_case ) + "\n" )
UpperCAmelCase_ : Any = 0
with open(_snake_case ,"w" ,encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _snake_case : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
UpperCAmelCase_ : Optional[int] = token_index
writer.write(" ".join(_snake_case ) + "\n" )
index += 1
return vocab_file, merge_file
| 323
| 1
|
"""simple docstring"""
import re
def snake_case ( A__ ):
UpperCAmelCase_ : Optional[int] = re.compile(r"^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$" )
if match := re.search(A__ ,A__ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('''+918827897895'''))
| 95
|
"""simple docstring"""
import math
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
return math.sqrt(lowerCAmelCase ) * math.sqrt(lowerCAmelCase ) == num
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = 0
UpperCAmelCase = n
while left <= right:
UpperCAmelCase = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
UpperCAmelCase = mid - 1
else:
UpperCAmelCase = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673
| 0
|
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
A__ : Union[str, Any] = get_logger(__name__)
class snake_case__ :
A__ = '''dummy_data'''
A__ = '''datasets'''
A__ = False
def __init__( self : Optional[Any] , __a : str , __a : str , __a : Union[Version, str] , __a : Optional[str] = None , __a : bool = False , __a : bool = True , __a : Optional[List[Callable]] = None , ) -> str:
'''simple docstring'''
__snake_case : Any = 0
__snake_case : Any = dataset_name
__snake_case : List[Any] = cache_dir
__snake_case : List[str] = use_local_dummy_data
__snake_case : Any = config
# download_callbacks take a single url as input
__snake_case : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__snake_case : Union[str, Any] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__snake_case : Optional[Any] = str(__a )
# to be downloaded
__snake_case : str = None
__snake_case : Union[str, Any] = None
@property
def A_ ( self : List[Any] ) -> int:
'''simple docstring'''
if self._dummy_file is None:
__snake_case : Optional[Any] = self.download_dummy_data()
return self._dummy_file
@property
def A_ ( self : Optional[int] ) -> str:
'''simple docstring'''
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name )
@property
def A_ ( self : str ) -> int:
'''simple docstring'''
return os.path.join(self.dummy_data_folder , 'dummy_data.zip' )
def A_ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : List[Any] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__snake_case : Dict = cached_path(
__a , cache_dir=self.cache_dir , extract_compressed_file=__a , force_extract=__a )
return os.path.join(__a , self.dummy_file_name )
@property
def A_ ( self : int ) -> List[str]:
'''simple docstring'''
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def A_ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
if self._bucket_url is None:
__snake_case : Any = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) )
return self._bucket_url
@property
def A_ ( self : str ) -> Any:
'''simple docstring'''
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] )
def A_ ( self : int , __a : Optional[Any] , *__a : int ) -> List[str]:
'''simple docstring'''
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__snake_case : Dict = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__snake_case : List[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(__a , __a ):
return self.create_dummy_data_dict(__a , __a )
elif isinstance(__a , (list, tuple) ):
return self.create_dummy_data_list(__a , __a )
else:
return self.create_dummy_data_single(__a , __a )
def A_ ( self : List[Any] , __a : Union[str, Any] , *__a : Tuple ) -> Optional[Any]:
'''simple docstring'''
return self.download_and_extract(__a )
def A_ ( self : Dict , __a : Optional[int] , __a : int ) -> Optional[int]:
'''simple docstring'''
return self.download_and_extract(__a )
def A_ ( self : Tuple , __a : int , *__a : Optional[Any] , **__a : Any ) -> List[str]:
'''simple docstring'''
return path
def A_ ( self : Optional[int] ) -> int:
'''simple docstring'''
return {}
def A_ ( self : Optional[Any] , __a : Dict , __a : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
__snake_case : Dict = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__a , __a ):
for single_url in single_urls:
download_callback(__a )
else:
__snake_case : int = single_urls
download_callback(__a )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__a , __a ):
__snake_case : Optional[int] = [os.path.join(__a , urllib.parse.quote_plus(Path(__a ).name ) ) for x in single_urls]
else:
__snake_case : str = single_urls
__snake_case : Union[str, Any] = os.path.join(__a , urllib.parse.quote_plus(Path(__a ).name ) )
__snake_case : Optional[Any] = value
# make sure that values are unique
if all(isinstance(__a , __a ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
__snake_case : List[Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def A_ ( self : Union[str, Any] , __a : Optional[Any] , __a : Optional[int] ) -> List[Any]:
'''simple docstring'''
__snake_case : Tuple = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__snake_case : Tuple = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , __a ) ) for url in data_url )
__snake_case : Union[str, Any] = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
__snake_case : str = [data_url[0]] * len(__a )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__a )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__snake_case : Optional[int] = os.path.join(__a , urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(__a )
return dummy_data_list
def A_ ( self : List[str] , __a : Tuple , __a : List[Any] ) -> Any:
'''simple docstring'''
for download_callback in self.download_callbacks:
download_callback(__a )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__snake_case : Dict = os.path.join(__a , urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(__a ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def A_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
pass
def A_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
pass
def A_ ( self : Optional[int] , __a : Dict ) -> List[Any]:
'''simple docstring'''
def _iter_archive_members(__a : Tuple ):
# this preserves the order of the members inside the ZIP archive
__snake_case : Union[str, Any] = Path(self.dummy_file ).parent
__snake_case : Tuple = path.relative_to(__a )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
__snake_case : Optional[Any] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(__a )
__snake_case : Optional[int] = Path(__a )
__snake_case : Dict = _iter_archive_members(__a ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(__a ).as_posix(), file_path.open('rb' )
def A_ ( self : Tuple , __a : List[Any] ) -> Optional[Any]:
'''simple docstring'''
if not isinstance(__a , __a ):
__snake_case : List[str] = [paths]
for path in paths:
if os.path.isfile(__a ):
if os.path.basename(__a ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__a ):
if os.path.basename(__a ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(__a ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(__a , __a )
| 124
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
A__ : Optional[int] = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Any = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
A__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 124
| 1
|
_snake_case = tuple[float, float, float]
_snake_case = tuple[float, float, float]
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Vectorad:
__UpperCAmelCase : Tuple = end_pointa[0] - end_pointa[0]
__UpperCAmelCase : List[str] = end_pointa[1] - end_pointa[1]
__UpperCAmelCase : Dict = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Vectorad:
__UpperCAmelCase : Tuple = ab[1] * ac[2] - ab[2] * ac[1] # *i
__UpperCAmelCase : Optional[int] = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
__UpperCAmelCase : List[Any] = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _UpperCamelCase ( snake_case__, snake_case__ ) -> bool:
return tuple(round(snake_case__, snake_case__ ) for x in vector ) == (0, 0, 0)
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__ = 10 ) -> bool:
__UpperCAmelCase : List[Any] = create_vector(snake_case__, snake_case__ )
__UpperCAmelCase : int = create_vector(snake_case__, snake_case__ )
return is_zero_vector(get_ad_vectors_cross(snake_case__, snake_case__ ), snake_case__ )
| 382
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class _snake_case ( _lowercase ):
lowerCamelCase__: Optional[int] = (DPMSolverSDEScheduler,)
lowerCamelCase__: int = 10
def _lowerCamelCase ( self: Any , **__lowerCamelCase: Optional[int] ) -> Optional[int]:
__UpperCAmelCase : Optional[Any] = {
"num_train_timesteps": 11_00,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"noise_sampler_seed": 0,
}
config.update(**__lowerCamelCase )
return config
def _lowerCamelCase ( self: List[str] ) -> Dict:
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def _lowerCamelCase ( self: List[str] ) -> Optional[Any]:
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=__lowerCamelCase , beta_end=__lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] ) -> Union[str, Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__lowerCamelCase )
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def _lowerCamelCase ( self: Tuple ) -> List[str]:
__UpperCAmelCase : List[str] = self.scheduler_classes[0]
__UpperCAmelCase : List[str] = self.get_scheduler_config()
__UpperCAmelCase : List[str] = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
__UpperCAmelCase : str = self.dummy_model()
__UpperCAmelCase : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
__UpperCAmelCase : List[Any] = sample.to(__lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
__UpperCAmelCase : List[Any] = scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Dict = model(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : int = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Tuple = output.prev_sample
__UpperCAmelCase : Dict = torch.sum(torch.abs(__lowerCamelCase ) )
__UpperCAmelCase : Union[str, Any] = torch.mean(torch.abs(__lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_67.47_82_10_44_92_18_75 ) < 1e-2
assert abs(result_mean.item() - 0.21_78_70_59_64_56_52_77 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_71.59_35_21_11_81_64_06 ) < 1e-2
assert abs(result_mean.item() - 0.2_23_42_90_68_92_29_96_52 ) < 1e-3
else:
assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62 ) < 1e-2
assert abs(result_mean.item() - 0.2_11_61_95_70_85_13_26 ) < 1e-3
def _lowerCamelCase ( self: List[Any] ) -> List[Any]:
__UpperCAmelCase : Dict = self.scheduler_classes[0]
__UpperCAmelCase : int = self.get_scheduler_config(prediction_type="v_prediction" )
__UpperCAmelCase : List[Any] = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
__UpperCAmelCase : List[str] = self.dummy_model()
__UpperCAmelCase : int = self.dummy_sample_deter * scheduler.init_noise_sigma
__UpperCAmelCase : Optional[int] = sample.to(__lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
__UpperCAmelCase : Optional[int] = scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : List[str] = model(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Any = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : List[Any] = output.prev_sample
__UpperCAmelCase : Dict = torch.sum(torch.abs(__lowerCamelCase ) )
__UpperCAmelCase : Tuple = torch.mean(torch.abs(__lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_24.77_14_92_00_43_94_53 ) < 1e-2
assert abs(result_mean.item() - 0.1_62_26_28_90_14_81_62_84 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_28.1_66_33_60_59_57_03 ) < 1e-2
assert abs(result_mean.item() - 0.1_66_88_32_60_01_16_72_97 ) < 1e-3
else:
assert abs(result_sum.item() - 1_19.8_48_75_48_82_81_25 ) < 1e-2
assert abs(result_mean.item() - 0.15_60_53_06_62_53_66_21 ) < 1e-3
def _lowerCamelCase ( self: Union[str, Any] ) -> str:
__UpperCAmelCase : str = self.scheduler_classes[0]
__UpperCAmelCase : Optional[Any] = self.get_scheduler_config()
__UpperCAmelCase : int = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=__lowerCamelCase )
__UpperCAmelCase : int = self.dummy_model()
__UpperCAmelCase : List[Any] = self.dummy_sample_deter.to(__lowerCamelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__UpperCAmelCase : int = scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : str = model(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : str = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Dict = output.prev_sample
__UpperCAmelCase : Tuple = torch.sum(torch.abs(__lowerCamelCase ) )
__UpperCAmelCase : List[Any] = torch.mean(torch.abs(__lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_67.46_95_73_97_46_09_38 ) < 1e-2
assert abs(result_mean.item() - 0.2_18_05_93_46_07_98_26_35 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_71.59_35_36_37_69_53_12 ) < 1e-2
assert abs(result_mean.item() - 0.2_23_42_90_83_82_41_57_71 ) < 1e-3
else:
assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62 ) < 1e-2
assert abs(result_mean.item() - 0.2_11_61_95_70_85_13_26 ) < 1e-3
def _lowerCamelCase ( self: Any ) -> Any:
__UpperCAmelCase : List[Any] = self.scheduler_classes[0]
__UpperCAmelCase : Dict = self.get_scheduler_config()
__UpperCAmelCase : List[Any] = scheduler_class(**__lowerCamelCase , use_karras_sigmas=__lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=__lowerCamelCase )
__UpperCAmelCase : List[Any] = self.dummy_model()
__UpperCAmelCase : Any = self.dummy_sample_deter.to(__lowerCamelCase ) * scheduler.init_noise_sigma
__UpperCAmelCase : Dict = sample.to(__lowerCamelCase )
for t in scheduler.timesteps:
__UpperCAmelCase : Dict = scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[int] = model(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : List[str] = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Dict = output.prev_sample
__UpperCAmelCase : Tuple = torch.sum(torch.abs(__lowerCamelCase ) )
__UpperCAmelCase : str = torch.mean(torch.abs(__lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_76.66_97_41_35_74_21_88 ) < 1e-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_77.63_65_35_64_45_31_25 ) < 1e-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1e-2
else:
assert abs(result_sum.item() - 1_70.3_13_52_23_38_86_72 ) < 1e-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1e-2
| 382
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase_ : str = {
'''configuration_transfo_xl''': ['''TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TransfoXLConfig'''],
'''tokenization_transfo_xl''': ['''TransfoXLCorpus''', '''TransfoXLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = [
'''TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AdaptiveEmbedding''',
'''TransfoXLForSequenceClassification''',
'''TransfoXLLMHeadModel''',
'''TransfoXLModel''',
'''TransfoXLPreTrainedModel''',
'''load_tf_weights_in_transfo_xl''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = [
'''TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFAdaptiveEmbedding''',
'''TFTransfoXLForSequenceClassification''',
'''TFTransfoXLLMHeadModel''',
'''TFTransfoXLMainLayer''',
'''TFTransfoXLModel''',
'''TFTransfoXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 165
|
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
UpperCAmelCase_ : List[str] = '''0.12''' # assumed parallelism: 8
if is_torch_available():
import torch
def _lowerCAmelCase(a : Tuple , a : int , a : Any=None ) -> Any:
if rng is None:
_SCREAMING_SNAKE_CASE =random.Random()
_SCREAMING_SNAKE_CASE =1
for dim in shape:
total_dims *= dim
_SCREAMING_SNAKE_CASE =[]
for _ in range(a ):
values.append(rng.randint(0 , vocab_size - 1 ) )
_SCREAMING_SNAKE_CASE =np.array(a , dtype=jnp.intaa ).reshape(a )
return output
def _lowerCAmelCase(a : Union[str, Any] , a : List[str]=None ) -> int:
_SCREAMING_SNAKE_CASE =ids_tensor(a , vocab_size=2 , rng=a )
# make sure that at least one token is attended to for each batch
_SCREAMING_SNAKE_CASE =1
return attn_mask
@require_flax
class __UpperCAmelCase :
'''simple docstring'''
lowercase : Union[str, Any] = None
lowercase : str = ()
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
_SCREAMING_SNAKE_CASE =2
_SCREAMING_SNAKE_CASE =inputs['''input_ids'''].shape[-1] // 2
_SCREAMING_SNAKE_CASE =inputs['''input_ids'''][:max_batch_size, :sequence_length]
_SCREAMING_SNAKE_CASE =jnp.ones_like(_A )
_SCREAMING_SNAKE_CASE =attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
_SCREAMING_SNAKE_CASE =input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
_SCREAMING_SNAKE_CASE =config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._get_input_ids_and_config()
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =max_length
_SCREAMING_SNAKE_CASE =0
for model_class in self.all_generative_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_A )
_SCREAMING_SNAKE_CASE =model_class.__name__[4:] # Skip the "Flax" at the beginning
_SCREAMING_SNAKE_CASE =getattr(_A , _A )
_SCREAMING_SNAKE_CASE =pt_model_class(_A ).eval()
_SCREAMING_SNAKE_CASE =load_flax_weights_in_pytorch_model(_A , flax_model.params )
_SCREAMING_SNAKE_CASE =flax_model.generate(_A ).sequences
_SCREAMING_SNAKE_CASE =pt_model.generate(torch.tensor(_A , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
_SCREAMING_SNAKE_CASE =flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._get_input_ids_and_config()
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =max_length
for model_class in self.all_generative_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_A )
_SCREAMING_SNAKE_CASE =model.generate(_A ).sequences
self.assertEqual(generation_outputs.shape[-1] , _A )
_SCREAMING_SNAKE_CASE =jit(model.generate )
_SCREAMING_SNAKE_CASE =jit_generate(_A ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._get_input_ids_and_config()
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =max_length
for model_class in self.all_generative_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_A )
_SCREAMING_SNAKE_CASE =model.generate(_A ).sequences
self.assertEqual(generation_outputs.shape[-1] , _A )
_SCREAMING_SNAKE_CASE =jit(model.generate )
_SCREAMING_SNAKE_CASE =jit_generate(_A ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._get_input_ids_and_config()
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =max_length
_SCREAMING_SNAKE_CASE =2
for model_class in self.all_generative_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_A )
_SCREAMING_SNAKE_CASE =model.generate(_A ).sequences
self.assertEqual(generation_outputs.shape[-1] , _A )
_SCREAMING_SNAKE_CASE =jit(model.generate )
_SCREAMING_SNAKE_CASE =jit_generate(_A ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._get_input_ids_and_config()
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =max_length
_SCREAMING_SNAKE_CASE =2
_SCREAMING_SNAKE_CASE =2
for model_class in self.all_generative_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_A )
_SCREAMING_SNAKE_CASE =model.generate(_A ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._get_input_ids_and_config()
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =max_length
_SCREAMING_SNAKE_CASE =0.8
_SCREAMING_SNAKE_CASE =1_0
_SCREAMING_SNAKE_CASE =0.3
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =8
_SCREAMING_SNAKE_CASE =9
for model_class in self.all_generative_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_A )
_SCREAMING_SNAKE_CASE =model.generate(_A ).sequences
self.assertEqual(generation_outputs.shape[-1] , _A )
_SCREAMING_SNAKE_CASE =jit(model.generate )
_SCREAMING_SNAKE_CASE =jit_generate(_A ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._get_input_ids_and_config()
_SCREAMING_SNAKE_CASE =max_length
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =8
_SCREAMING_SNAKE_CASE =9
for model_class in self.all_generative_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_A )
_SCREAMING_SNAKE_CASE =model.generate(_A ).sequences
self.assertEqual(generation_outputs.shape[-1] , _A )
_SCREAMING_SNAKE_CASE =jit(model.generate )
_SCREAMING_SNAKE_CASE =jit_generate(_A ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._get_input_ids_and_config()
_SCREAMING_SNAKE_CASE =max_length
_SCREAMING_SNAKE_CASE =2
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =8
_SCREAMING_SNAKE_CASE =9
for model_class in self.all_generative_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_A )
_SCREAMING_SNAKE_CASE =model.generate(_A ).sequences
self.assertEqual(generation_outputs.shape[-1] , _A )
_SCREAMING_SNAKE_CASE =jit(model.generate )
_SCREAMING_SNAKE_CASE =jit_generate(_A ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._get_input_ids_and_config()
# pad attention mask on the left
_SCREAMING_SNAKE_CASE =attention_mask.at[(0, 0)].set(0 )
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =max_length
for model_class in self.all_generative_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_A )
_SCREAMING_SNAKE_CASE =model.generate(_A , attention_mask=_A ).sequences
self.assertEqual(generation_outputs.shape[-1] , _A )
_SCREAMING_SNAKE_CASE =jit(model.generate )
_SCREAMING_SNAKE_CASE =jit_generate(_A , attention_mask=_A ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._get_input_ids_and_config()
# pad attention mask on the left
_SCREAMING_SNAKE_CASE =attention_mask.at[(0, 0)].set(0 )
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =max_length
for model_class in self.all_generative_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_A )
_SCREAMING_SNAKE_CASE =model.generate(_A , attention_mask=_A ).sequences
self.assertEqual(generation_outputs.shape[-1] , _A )
_SCREAMING_SNAKE_CASE =jit(model.generate )
_SCREAMING_SNAKE_CASE =jit_generate(_A , attention_mask=_A ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._get_input_ids_and_config()
# pad attention mask on the left
_SCREAMING_SNAKE_CASE =attention_mask.at[(0, 0)].set(0 )
_SCREAMING_SNAKE_CASE =2
_SCREAMING_SNAKE_CASE =max_length
for model_class in self.all_generative_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_A )
_SCREAMING_SNAKE_CASE =model.generate(_A , attention_mask=_A ).sequences
self.assertEqual(generation_outputs.shape[-1] , _A )
_SCREAMING_SNAKE_CASE =jit(model.generate )
_SCREAMING_SNAKE_CASE =jit_generate(_A , attention_mask=_A ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''' )
_SCREAMING_SNAKE_CASE =FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
_SCREAMING_SNAKE_CASE ='''Hello world'''
_SCREAMING_SNAKE_CASE =tokenizer(_A , return_tensors='''np''' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(_A , '''do_samples''' ):
model.generate(_A , do_samples=_A )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(_A , '''foo''' ):
_SCREAMING_SNAKE_CASE ={'''foo''': '''bar'''}
model.generate(_A , **_A )
| 165
| 1
|
"""simple docstring"""
from collections import deque
from .hash_table import HashTable
class UpperCAmelCase_ ( snake_case ):
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Any:
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
__lowercase : Tuple = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(UpperCamelCase_ )
__lowercase : Tuple = self.values[key]
def _lowerCamelCase ( self ) -> List[Any]:
return (
sum(self.charge_factor - len(UpperCamelCase_ ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=None ) -> List[str]:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(UpperCamelCase_ ) == 0
):
return key
return super()._collision_resolution(UpperCamelCase_ , UpperCamelCase_ )
| 76
|
"""simple docstring"""
import math
import flax.linen as nn
import jax.numpy as jnp
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 , __UpperCamelCase = 1 , __UpperCamelCase = 1.0e4 , __UpperCamelCase = False , __UpperCamelCase = 1.0 , ):
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f"""Embedding dimension {embedding_dim} should be even"""
__lowercase : Dict = float(embedding_dim // 2 )
__lowercase : Tuple = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
__lowercase : List[Any] = min_timescale * jnp.exp(jnp.arange(__UpperCamelCase , dtype=jnp.floataa ) * -log_timescale_increment )
__lowercase : Any = jnp.expand_dims(__UpperCamelCase , 1 ) * jnp.expand_dims(__UpperCamelCase , 0 )
# scale embeddings
__lowercase : Optional[int] = scale * emb
if flip_sin_to_cos:
__lowercase : Any = jnp.concatenate([jnp.cos(__UpperCamelCase ), jnp.sin(__UpperCamelCase )] , axis=1 )
else:
__lowercase : List[str] = jnp.concatenate([jnp.sin(__UpperCamelCase ), jnp.cos(__UpperCamelCase )] , axis=1 )
__lowercase : int = jnp.reshape(__UpperCamelCase , [jnp.shape(__UpperCamelCase )[0], embedding_dim] )
return signal
class UpperCAmelCase_ ( nn.Module ):
UpperCamelCase =32
UpperCamelCase =jnp.floataa
@nn.compact
def __call__( self , UpperCamelCase_ ) -> Optional[int]:
__lowercase : Union[str, Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_1''' )(UpperCamelCase_ )
__lowercase : str = nn.silu(UpperCamelCase_ )
__lowercase : Dict = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_2''' )(UpperCamelCase_ )
return temb
class UpperCAmelCase_ ( nn.Module ):
UpperCamelCase =32
UpperCamelCase =False
UpperCamelCase =1
@nn.compact
def __call__( self , UpperCamelCase_ ) -> Optional[int]:
return get_sinusoidal_embeddings(
UpperCamelCase_ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 76
| 1
|
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCAmelCase__ ( unittest.TestCase ):
def _lowercase ( self : Any):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase ( self : Dict):
torch.manual_seed(0)
A__ : Union[str, Any] = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return model
@property
def _lowercase ( self : Tuple):
torch.manual_seed(0)
A__ : List[str] = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , )
return model
@property
def _lowercase ( self : Optional[int]):
torch.manual_seed(0)
A__ : Union[str, Any] = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , )
A__ : str = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return vqvae, unet
@slow
def _lowercase ( self : Union[str, Any]):
A__ : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
A__ : Union[str, Any] = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
A__ : Dict = DDPMScheduler()
A__ : Optional[Any] = AudioDiffusionPipeline(vqvae=_A , unet=self.dummy_unet , mel=_A , scheduler=_A)
A__ : Union[str, Any] = pipe.to(_A)
pipe.set_progress_bar_config(disable=_A)
A__ : int = torch.Generator(device=_A).manual_seed(42)
A__ : str = pipe(generator=_A , steps=4)
A__ : Optional[int] = output.audios[0]
A__ : Union[str, Any] = output.images[0]
A__ : Dict = torch.Generator(device=_A).manual_seed(42)
A__ : Dict = pipe(generator=_A , steps=4 , return_dict=_A)
A__ : Optional[Any] = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
A__ : Any = np.frombuffer(image.tobytes() , dtype="uint8")[:10]
A__ : List[str] = np.frombuffer(image_from_tuple.tobytes() , dtype="uint8")[:10]
A__ : Optional[Any] = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127])
assert np.abs(image_slice.flatten() - expected_slice).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() == 0
A__ : str = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
A__ : Union[str, Any] = DDIMScheduler()
A__ : Any = self.dummy_vqvae_and_unet
A__ : Optional[int] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=_A , scheduler=_A)
A__ : int = pipe.to(_A)
pipe.set_progress_bar_config(disable=_A)
np.random.seed(0)
A__ : Optional[Any] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,))
A__ : Optional[int] = torch.Generator(device=_A).manual_seed(42)
A__ : int = pipe(raw_audio=_A , generator=_A , start_step=5 , steps=10)
A__ : List[str] = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
A__ : Optional[Any] = np.frombuffer(image.tobytes() , dtype="uint8")[:10]
A__ : Tuple = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121])
assert np.abs(image_slice.flatten() - expected_slice).max() == 0
A__ : str = self.dummy_unet_condition
A__ : List[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=_A , mel=_A , scheduler=_A)
A__ : Optional[Any] = pipe.to(_A)
pipe.set_progress_bar_config(disable=_A)
np.random.seed(0)
A__ : Tuple = torch.rand((1, 1, 10))
A__ : Optional[Any] = pipe(generator=_A , encoding=_A)
A__ : Dict = output.images[0]
A__ : Tuple = np.frombuffer(image.tobytes() , dtype="uint8")[:10]
A__ : Any = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111])
assert np.abs(image_slice.flatten() - expected_slice).max() == 0
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def _lowercase ( self : int):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : Dict):
A__ : Optional[int] = torch_device
A__ : Optional[Any] = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256")
A__ : Optional[int] = pipe.to(_A)
pipe.set_progress_bar_config(disable=_A)
A__ : List[Any] = torch.Generator(device=_A).manual_seed(42)
A__ : Tuple = pipe(generator=_A)
A__ : Tuple = output.audios[0]
A__ : str = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
A__ : List[Any] = np.frombuffer(image.tobytes() , dtype="uint8")[:10]
A__ : str = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26])
assert np.abs(image_slice.flatten() - expected_slice).max() == 0
| 705
|
from collections.abc import Callable
import numpy as np
def snake_case__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> np.array:
"""simple docstring"""
A__ : Any = int(np.ceil((x_end - xa) / step_size ) )
A__ : Union[str, Any] = np.zeros((n + 1,) )
A__ : Any = ya
A__ : Union[str, Any] = xa
for k in range(__lowercase ):
A__ : Any = y[k] + step_size * ode_func(__lowercase , y[k] )
A__ : Any = y[k] + (
(step_size / 2) * (ode_func(__lowercase , y[k] ) + ode_func(x + step_size , __lowercase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 182
| 0
|
from ...processing_utils import ProcessorMixin
class snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCAmelCase = "SpeechT5FeatureExtractor"
__lowerCAmelCase = "SpeechT5Tokenizer"
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ):
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
def __call__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
__lowercase = kwargs.pop("audio" , lowerCamelCase__ )
__lowercase = kwargs.pop("text" , lowerCamelCase__ )
__lowercase = kwargs.pop("text_target" , lowerCamelCase__ )
__lowercase = kwargs.pop("audio_target" , lowerCamelCase__ )
__lowercase = kwargs.pop("sampling_rate" , lowerCamelCase__ )
if audio is not None and text is not None:
raise ValueError(
"Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?" )
if audio_target is not None and text_target is not None:
raise ValueError(
"Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process." )
if audio is not None:
__lowercase = self.feature_extractor(lowerCamelCase__ , *lowerCamelCase__ , sampling_rate=lowerCamelCase__ , **lowerCamelCase__ )
elif text is not None:
__lowercase = self.tokenizer(lowerCamelCase__ , **lowerCamelCase__ )
else:
__lowercase = None
if audio_target is not None:
__lowercase = self.feature_extractor(audio_target=lowerCamelCase__ , *lowerCamelCase__ , sampling_rate=lowerCamelCase__ , **lowerCamelCase__ )
__lowercase = targets["""input_values"""]
elif text_target is not None:
__lowercase = self.tokenizer(lowerCamelCase__ , **lowerCamelCase__ )
__lowercase = targets["""input_ids"""]
else:
__lowercase = None
if inputs is None:
return targets
if targets is not None:
__lowercase = labels
__lowercase = targets.get("attention_mask" )
if decoder_attention_mask is not None:
__lowercase = decoder_attention_mask
return inputs
def snake_case__ ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
__lowercase = kwargs.pop("input_values" , lowerCamelCase__ )
__lowercase = kwargs.pop("input_ids" , lowerCamelCase__ )
__lowercase = kwargs.pop("labels" , lowerCamelCase__ )
if input_values is not None and input_ids is not None:
raise ValueError("Cannot process both `input_values` and `input_ids` inputs." )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded." )
if input_values is not None:
__lowercase = self.feature_extractor.pad(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
elif input_ids is not None:
__lowercase = self.tokenizer.pad(lowerCamelCase__ , **lowerCamelCase__ )
else:
__lowercase = None
if labels is not None:
if "input_ids" in labels or (isinstance(lowerCamelCase__ , lowerCamelCase__ ) and "input_ids" in labels[0]):
__lowercase = self.tokenizer.pad(lowerCamelCase__ , **lowerCamelCase__ )
__lowercase = targets["""input_ids"""]
else:
__lowercase = self.feature_extractor.feature_size
__lowercase = self.feature_extractor.num_mel_bins
__lowercase = self.feature_extractor.pad(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
__lowercase = feature_size_hack
__lowercase = targets["""input_values"""]
else:
__lowercase = None
if inputs is None:
return targets
if targets is not None:
__lowercase = labels
__lowercase = targets.get("attention_mask" )
if decoder_attention_mask is not None:
__lowercase = decoder_attention_mask
return inputs
def snake_case__ ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def snake_case__ ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
| 321
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Tuple = logging.get_logger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Any:
"""simple docstring"""
A : Dict = """backbone.""" if is_semantic else """"""
A : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', """beit.embeddings.cls_token"""),
(f'''{prefix}patch_embed.proj.weight''', """beit.embeddings.patch_embeddings.projection.weight"""),
(f'''{prefix}patch_embed.proj.bias''', """beit.embeddings.patch_embeddings.projection.bias"""),
(f'''{prefix}pos_embed''', """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Any:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
A : Dict = """backbone.""" if is_semantic else """"""
# queries, keys and values
A : Union[str, Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
A : Tuple = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
A : Optional[int] = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
A : int = in_proj_weight[
: config.hidden_size, :
]
A : Any = q_bias
A : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A : Tuple = in_proj_weight[
-config.hidden_size :, :
]
A : Union[str, Any] = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
A : str = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
A : List[Any] = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
A : Dict = gamma_a
A : Dict = gamma_a
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
"""simple docstring"""
A : List[str] = dct.pop(_lowerCAmelCase )
A : Optional[Any] = val
def __UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
A : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A : Optional[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> str:
"""simple docstring"""
A : Dict = False if """rvlcdip""" in checkpoint_url else True
A : Union[str, Any] = BeitConfig(use_absolute_position_embeddings=_lowerCAmelCase , use_mask_token=_lowerCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
A : Dict = 1024
A : List[Any] = 4096
A : int = 24
A : int = 16
# labels
if "rvlcdip" in checkpoint_url:
A : List[Any] = 16
A : List[Any] = """huggingface/label-files"""
A : int = """rvlcdip-id2label.json"""
A : Dict = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
A : List[str] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
A : int = idalabel
A : Union[str, Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
A : List[str] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="""cpu""" )["""model"""]
A : str = create_rename_keys(_lowerCAmelCase , has_lm_head=_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , has_lm_head=_lowerCAmelCase )
# load HuggingFace model
A : Any = BeitForMaskedImageModeling(_lowerCAmelCase ) if has_lm_head else BeitForImageClassification(_lowerCAmelCase )
model.eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image
A : Any = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_lowerCAmelCase )
A : int = prepare_img()
A : Tuple = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" )
A : str = encoding["""pixel_values"""]
A : Tuple = model(_lowerCAmelCase )
A : Optional[int] = outputs.logits
# verify logits
A : Tuple = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(_lowerCAmelCase ), "Shape of logits not as expected"
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
if has_lm_head:
A : Any = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
A : List[Any] = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_lowerCAmelCase , )
model.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_lowerCAmelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 662
| 0
|
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ):
# ===== initialization =====
lowercase = Mock()
lowercase = conn, Mock()
lowercase = iter([1, None] )
lowercase = lambda lowerCAmelCase__ : next(lowerCAmelCase__ )
# ===== invoke =====
send_file(filename="""mytext.txt""" ,testing=lowerCAmelCase__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 720
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class A_ :
def __init__( self : List[str] , snake_case__ : Union[str, Any] ):
lowercase = data
lowercase = [0X6_7_4_5_2_3_0_1, 0Xe_f_c_d_a_b_8_9, 0X9_8_b_a_d_c_f_e, 0X1_0_3_2_5_4_7_6, 0Xc_3_d_2_e_1_f_0]
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ):
return ((n << b) | (n >> (32 - b))) & 0Xf_f_f_f_f_f_f_f
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowercase = b"""\x80""" + b"""\x00""" * (63 - (len(self.data ) + 8) % 64)
lowercase = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) )
return padded_data
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case__ : Tuple ):
lowercase = list(struct.unpack(""">16L""" , snake_case__ ) ) + [0] * 64
for i in range(16 , 80 ):
lowercase = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowercase = self.padding()
lowercase = self.split_blocks()
for block in self.blocks:
lowercase = self.expand_block(snake_case__ )
lowercase , lowercase , lowercase , lowercase , lowercase = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
lowercase = (b & c) | ((~b) & d)
lowercase = 0X5_a_8_2_7_9_9_9
elif 20 <= i < 40:
lowercase = b ^ c ^ d
lowercase = 0X6_e_d_9_e_b_a_1
elif 40 <= i < 60:
lowercase = (b & c) | (b & d) | (c & d)
lowercase = 0X8_f_1_b_b_c_d_c
elif 60 <= i < 80:
lowercase = b ^ c ^ d
lowercase = 0Xc_a_6_2_c_1_d_6
lowercase , lowercase , lowercase , lowercase , lowercase = (
self.rotate(snake_case__ , 5 ) + f + e + k + expanded_block[i] & 0Xf_f_f_f_f_f_f_f,
a,
self.rotate(snake_case__ , 30 ),
c,
d,
)
lowercase = (
self.h[0] + a & 0Xf_f_f_f_f_f_f_f,
self.h[1] + b & 0Xf_f_f_f_f_f_f_f,
self.h[2] + c & 0Xf_f_f_f_f_f_f_f,
self.h[3] + d & 0Xf_f_f_f_f_f_f_f,
self.h[4] + e & 0Xf_f_f_f_f_f_f_f,
)
return ("{:08x}" * 5).format(*self.h )
def UpperCamelCase__ ( ):
lowercase = b"""Test String"""
assert SHAaHash(lowerCAmelCase__ ).final_hash() == hashlib.shaa(lowerCAmelCase__ ).hexdigest() # noqa: S324
def UpperCamelCase__ ( ):
lowercase = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" ,dest="""input_string""" ,default="""Hello World!! Welcome to Cryptography""" ,help="""Hash the string""" ,)
parser.add_argument("""--file""" ,dest="""input_file""" ,help="""Hash contents of a file""" )
lowercase = parser.parse_args()
lowercase = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file ,"""rb""" ) as f:
lowercase = f.read()
else:
lowercase = bytes(lowerCAmelCase__ ,"""utf-8""" )
print(SHAaHash(lowerCAmelCase__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 72
| 0
|
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
A_: List[str] = re.compile(R'\b(a|an|the)\b', re.UNICODE)
A_: Tuple = None
def __lowerCAmelCase ( ):
"""simple docstring"""
_lowercase = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" )
parser.add_argument("""data_file""" ,metavar="""data.json""" ,help="""Input data JSON file.""" )
parser.add_argument("""pred_file""" ,metavar="""pred.json""" ,help="""Model predictions.""" )
parser.add_argument(
"""--out-file""" ,"""-o""" ,metavar="""eval.json""" ,help="""Write accuracy metrics to file (default is stdout).""" )
parser.add_argument(
"""--na-prob-file""" ,"""-n""" ,metavar="""na_prob.json""" ,help="""Model estimates of probability of no answer.""" )
parser.add_argument(
"""--na-prob-thresh""" ,"""-t""" ,type=_UpperCamelCase ,default=1.0 ,help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" ,)
parser.add_argument(
"""--out-image-dir""" ,"""-p""" ,metavar="""out_images""" ,default=_UpperCamelCase ,help="""Save precision-recall curves to directory.""" )
parser.add_argument("""--verbose""" ,"""-v""" ,action="""store_true""" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __lowerCAmelCase ( _A ):
"""simple docstring"""
_lowercase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_lowercase = bool(qa["""answers"""]["""text"""] )
return qid_to_has_ans
def __lowerCAmelCase ( _A ):
"""simple docstring"""
def remove_articles(_A ):
return ARTICLES_REGEX.sub(""" """ ,_UpperCamelCase )
def white_space_fix(_A ):
return " ".join(text.split() )
def remove_punc(_A ):
_lowercase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_A ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_UpperCamelCase ) ) ) )
def __lowerCAmelCase ( _A ):
"""simple docstring"""
if not s:
return []
return normalize_answer(_UpperCamelCase ).split()
def __lowerCAmelCase ( _A ,_A ):
"""simple docstring"""
return int(normalize_answer(_UpperCamelCase ) == normalize_answer(_UpperCamelCase ) )
def __lowerCAmelCase ( _A ,_A ):
"""simple docstring"""
_lowercase = get_tokens(_UpperCamelCase )
_lowercase = get_tokens(_UpperCamelCase )
_lowercase = collections.Counter(_UpperCamelCase ) & collections.Counter(_UpperCamelCase )
_lowercase = sum(common.values() )
if len(_UpperCamelCase ) == 0 or len(_UpperCamelCase ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
_lowercase = 1.0 * num_same / len(_UpperCamelCase )
_lowercase = 1.0 * num_same / len(_UpperCamelCase )
_lowercase = (2 * precision * recall) / (precision + recall)
return fa
def __lowerCAmelCase ( _A ,_A ):
"""simple docstring"""
_lowercase = {}
_lowercase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_lowercase = qa["""id"""]
_lowercase = [t for t in qa["""answers"""]["""text"""] if normalize_answer(_UpperCamelCase )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
_lowercase = [""""""]
if qid not in preds:
print(f'''Missing prediction for {qid}''' )
continue
_lowercase = preds[qid]
# Take max over all gold answers
_lowercase = max(compute_exact(_UpperCamelCase ,_UpperCamelCase ) for a in gold_answers )
_lowercase = max(compute_fa(_UpperCamelCase ,_UpperCamelCase ) for a in gold_answers )
return exact_scores, fa_scores
def __lowerCAmelCase ( _A ,_A ,_A ,_A ):
"""simple docstring"""
_lowercase = {}
for qid, s in scores.items():
_lowercase = na_probs[qid] > na_prob_thresh
if pred_na:
_lowercase = float(not qid_to_has_ans[qid] )
else:
_lowercase = s
return new_scores
def __lowerCAmelCase ( _A ,_A ,_A=None ):
"""simple docstring"""
if not qid_list:
_lowercase = len(_UpperCamelCase )
return collections.OrderedDict(
[
("""exact""", 1_0_0.0 * sum(exact_scores.values() ) / total),
("""f1""", 1_0_0.0 * sum(fa_scores.values() ) / total),
("""total""", total),
] )
else:
_lowercase = len(_UpperCamelCase )
return collections.OrderedDict(
[
("""exact""", 1_0_0.0 * sum(exact_scores[k] for k in qid_list ) / total),
("""f1""", 1_0_0.0 * sum(fa_scores[k] for k in qid_list ) / total),
("""total""", total),
] )
def __lowerCAmelCase ( _A ,_A ,_A ):
"""simple docstring"""
for k in new_eval:
_lowercase = new_eval[k]
def __lowerCAmelCase ( _A ,_A ,_A ,_A ):
"""simple docstring"""
plt.step(_UpperCamelCase ,_UpperCamelCase ,color="""b""" ,alpha=0.2 ,where="""post""" )
plt.fill_between(_UpperCamelCase ,_UpperCamelCase ,step="""post""" ,alpha=0.2 ,color="""b""" )
plt.xlabel("""Recall""" )
plt.ylabel("""Precision""" )
plt.xlim([0.0, 1.0_5] )
plt.ylim([0.0, 1.0_5] )
plt.title(_UpperCamelCase )
plt.savefig(_UpperCamelCase )
plt.clf()
def __lowerCAmelCase ( _A ,_A ,_A ,_A ,_A=None ,_A=None ):
"""simple docstring"""
_lowercase = sorted(_UpperCamelCase ,key=lambda _A : na_probs[k] )
_lowercase = 0.0
_lowercase = 1.0
_lowercase = 0.0
_lowercase = [1.0]
_lowercase = [0.0]
_lowercase = 0.0
for i, qid in enumerate(_UpperCamelCase ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
_lowercase = true_pos / float(i + 1 )
_lowercase = true_pos / float(_UpperCamelCase )
if i == len(_UpperCamelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_UpperCamelCase )
recalls.append(_UpperCamelCase )
if out_image:
plot_pr_curve(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
return {"ap": 1_0_0.0 * avg_prec}
def __lowerCAmelCase ( _A ,_A ,_A ,_A ,_A ,_A ):
"""simple docstring"""
if out_image_dir and not os.path.exists(_UpperCamelCase ):
os.makedirs(_UpperCamelCase )
_lowercase = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
_lowercase = make_precision_recall_eval(
_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,out_image=os.path.join(_UpperCamelCase ,"""pr_exact.png""" ) ,title="""Precision-Recall curve for Exact Match score""" ,)
_lowercase = make_precision_recall_eval(
_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,out_image=os.path.join(_UpperCamelCase ,"""pr_f1.png""" ) ,title="""Precision-Recall curve for F1 score""" ,)
_lowercase = {k: float(_UpperCamelCase ) for k, v in qid_to_has_ans.items()}
_lowercase = make_precision_recall_eval(
_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,out_image=os.path.join(_UpperCamelCase ,"""pr_oracle.png""" ) ,title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" ,)
merge_eval(_UpperCamelCase ,_UpperCamelCase ,"""pr_exact""" )
merge_eval(_UpperCamelCase ,_UpperCamelCase ,"""pr_f1""" )
merge_eval(_UpperCamelCase ,_UpperCamelCase ,"""pr_oracle""" )
def __lowerCAmelCase ( _A ,_A ,_A ,_A ):
"""simple docstring"""
if not qid_list:
return
_lowercase = [na_probs[k] for k in qid_list]
_lowercase = np.ones_like(_UpperCamelCase ) / float(len(_UpperCamelCase ) )
plt.hist(_UpperCamelCase ,weights=_UpperCamelCase ,bins=20 ,range=(0.0, 1.0) )
plt.xlabel("""Model probability of no-answer""" )
plt.ylabel("""Proportion of dataset""" )
plt.title(f'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(_UpperCamelCase ,f'''na_prob_hist_{name}.png''' ) )
plt.clf()
def __lowerCAmelCase ( _A ,_A ,_A ,_A ):
"""simple docstring"""
_lowercase = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
_lowercase = num_no_ans
_lowercase = cur_score
_lowercase = 0.0
_lowercase = sorted(_UpperCamelCase ,key=lambda _A : na_probs[k] )
for i, qid in enumerate(_UpperCamelCase ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
_lowercase = scores[qid]
else:
if preds[qid]:
_lowercase = -1
else:
_lowercase = 0
cur_score += diff
if cur_score > best_score:
_lowercase = cur_score
_lowercase = na_probs[qid]
return 1_0_0.0 * best_score / len(_UpperCamelCase ), best_thresh
def __lowerCAmelCase ( _A ,_A ,_A ,_A ,_A ,_A ):
"""simple docstring"""
_lowercase , _lowercase = find_best_thresh(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
_lowercase , _lowercase = find_best_thresh(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
_lowercase = best_exact
_lowercase = exact_thresh
_lowercase = best_fa
_lowercase = fa_thresh
def __lowerCAmelCase ( ):
"""simple docstring"""
with open(OPTS.data_file ) as f:
_lowercase = json.load(_UpperCamelCase )
_lowercase = dataset_json["""data"""]
with open(OPTS.pred_file ) as f:
_lowercase = json.load(_UpperCamelCase )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
_lowercase = json.load(_UpperCamelCase )
else:
_lowercase = {k: 0.0 for k in preds}
_lowercase = make_qid_to_has_ans(_UpperCamelCase ) # maps qid to True/False
_lowercase = [k for k, v in qid_to_has_ans.items() if v]
_lowercase = [k for k, v in qid_to_has_ans.items() if not v]
_lowercase , _lowercase = get_raw_scores(_UpperCamelCase ,_UpperCamelCase )
_lowercase = apply_no_ans_threshold(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,OPTS.na_prob_thresh )
_lowercase = apply_no_ans_threshold(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,OPTS.na_prob_thresh )
_lowercase = make_eval_dict(_UpperCamelCase ,_UpperCamelCase )
if has_ans_qids:
_lowercase = make_eval_dict(_UpperCamelCase ,_UpperCamelCase ,qid_list=_UpperCamelCase )
merge_eval(_UpperCamelCase ,_UpperCamelCase ,"""HasAns""" )
if no_ans_qids:
_lowercase = make_eval_dict(_UpperCamelCase ,_UpperCamelCase ,qid_list=_UpperCamelCase )
merge_eval(_UpperCamelCase ,_UpperCamelCase ,"""NoAns""" )
if OPTS.na_prob_file:
find_all_best_thresh(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,OPTS.out_image_dir )
histogram_na_prob(_UpperCamelCase ,_UpperCamelCase ,OPTS.out_image_dir ,"""hasAns""" )
histogram_na_prob(_UpperCamelCase ,_UpperCamelCase ,OPTS.out_image_dir ,"""noAns""" )
if OPTS.out_file:
with open(OPTS.out_file ,"""w""" ) as f:
json.dump(_UpperCamelCase ,_UpperCamelCase )
else:
print(json.dumps(_UpperCamelCase ,indent=2 ) )
if __name__ == "__main__":
A_: str = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main()
| 398
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCAmelCase__ )
class lowerCamelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase = field(default="automatic-speech-recognition" , metadata={"include_in_asdict_even_if_is_default": True} )
__UpperCAmelCase = Features({"audio": Audio()} )
__UpperCAmelCase = Features({"transcription": Value("string" )} )
__UpperCAmelCase = "audio"
__UpperCAmelCase = "transcription"
def A ( self , snake_case_ ) -> Any:
'''simple docstring'''
if self.audio_column not in features:
raise ValueError(F'Column {self.audio_column} is not present in features.' )
if not isinstance(features[self.audio_column] , snake_case_ ):
raise ValueError(F'Column {self.audio_column} is not an Audio type.' )
__lowercase = copy.deepcopy(self )
__lowercase = self.input_schema.copy()
__lowercase = features[self.audio_column]
__lowercase = input_schema
return task_template
@property
def A ( self ) -> Dict[str, str]:
'''simple docstring'''
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 639
| 0
|
from __future__ import annotations
def lowerCAmelCase_ (lowercase__ : int | float | str , lowercase__ : int | float | str ) -> list[str]:
'''simple docstring'''
if nth_term == "":
return [""]
lowerCAmelCase__ = int(lowercase__ )
lowerCAmelCase__ = int(lowercase__ )
lowerCAmelCase__ = []
for temp in range(int(lowercase__ ) ):
series.append(f'1 / {pow(temp + 1 , int(lowercase__ ) )}' if series else '''1''' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase : List[Any] = int(input("Enter the last number (nth term) of the P-Series"))
_UpperCAmelCase : Union[str, Any] = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 288
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :Tuple = ['image_processor', 'tokenizer']
UpperCamelCase_ :Tuple = 'ViTImageProcessor'
UpperCamelCase_ :Dict = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : Tuple=None , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
lowerCAmelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ = kwargs.pop('''feature_extractor''' )
lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __call__( self : Dict , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : Optional[int]=None , **SCREAMING_SNAKE_CASE_ : Optional[int] ):
if text is None and visual_prompt is None and images is None:
raise ValueError('''You have to specify either text, visual prompt or images.''' )
if text is not None and visual_prompt is not None:
raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''' )
if text is not None:
lowerCAmelCase__ = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if visual_prompt is not None:
lowerCAmelCase__ = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if images is not None:
lowerCAmelCase__ = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if visual_prompt is not None and images is not None:
lowerCAmelCase__ = {
'''pixel_values''': image_features.pixel_values,
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
lowerCAmelCase__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
lowerCAmelCase__ = {
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE_ ) , tensor_type=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : str ):
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[int] , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : Tuple ):
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def __snake_case ( self : str ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , SCREAMING_SNAKE_CASE_ , )
return self.image_processor_class
@property
def __snake_case ( self : Tuple ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , SCREAMING_SNAKE_CASE_ , )
return self.image_processor
| 288
| 1
|
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
lowerCamelCase : List[str] =4
lowerCamelCase : List[Any] =3
class __a ( A__ ):
pass
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[Any]:
for shard in shards:
for i in range(UpperCAmelCase_ ):
yield {"i": i, "shard": shard}
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
UpperCamelCase__ : Any = int(os.environ["RANK"] )
UpperCamelCase__ : Dict = int(os.environ["WORLD_SIZE"] )
UpperCamelCase__ : Any = ArgumentParser()
parser.add_argument("--streaming" , type=UpperCAmelCase_ )
parser.add_argument("--local_rank" , type=UpperCAmelCase_ )
parser.add_argument("--num_workers" , type=UpperCAmelCase_ , default=0 )
UpperCamelCase__ : Tuple = parser.parse_args()
UpperCamelCase__ : int = args.streaming
UpperCamelCase__ : int = args.num_workers
UpperCamelCase__ : Dict = {'''shards''': [f'shard_{shard_idx}' for shard_idx in range(UpperCAmelCase_ )]}
UpperCamelCase__ : List[str] = IterableDataset.from_generator(UpperCAmelCase_ , gen_kwargs=UpperCAmelCase_ )
if not streaming:
UpperCamelCase__ : Optional[Any] = Dataset.from_list(list(UpperCAmelCase_ ) )
UpperCamelCase__ : Tuple = split_dataset_by_node(UpperCAmelCase_ , rank=UpperCAmelCase_ , world_size=UpperCAmelCase_ )
UpperCamelCase__ : List[Any] = torch.utils.data.DataLoader(UpperCAmelCase_ , num_workers=UpperCAmelCase_ )
UpperCamelCase__ : Union[str, Any] = NUM_SHARDS * NUM_ITEMS_PER_SHARD
UpperCamelCase__ : int = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
UpperCamelCase__ : List[Any] = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f'local_size {local_size} != expected_local_size {expected_local_size}' )
if __name__ == "__main__":
main()
| 228
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase_ ( A ):
__lowerCamelCase = 42
__lowerCamelCase = 42
def __init__( self , __A , __A ) -> List[str]:
super().__init__()
self.register_modules(unet=__A , scheduler=__A )
@torch.no_grad()
def __call__( self , __A = 1 , __A = 50 , __A = None , __A = "pil" , __A = True , **__A , ) -> Union[Tuple, ImagePipelineOutput]:
SCREAMING_SNAKE_CASE_ : Dict =self.unet.config.sample_size
SCREAMING_SNAKE_CASE_ : Tuple =(batch_size, 3, img_size, img_size)
SCREAMING_SNAKE_CASE_ : int =self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
SCREAMING_SNAKE_CASE_ : Tuple =randn_tensor(__A , generator=__A , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__A )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.scheduler.schedule[t]
SCREAMING_SNAKE_CASE_ : List[Any] =self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict =self.scheduler.add_noise_to_input(__A , __A , generator=__A )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
SCREAMING_SNAKE_CASE_ : Tuple =(sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
SCREAMING_SNAKE_CASE_ : List[Any] =self.scheduler.step(__A , __A , __A , __A )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
SCREAMING_SNAKE_CASE_ : int =(sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
SCREAMING_SNAKE_CASE_ : Dict =self.scheduler.step_correct(
__A , __A , __A , __A , step_output.prev_sample , step_output['''derivative'''] , )
SCREAMING_SNAKE_CASE_ : Any =step_output.prev_sample
SCREAMING_SNAKE_CASE_ : List[Any] =(sample / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ : Any =sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.numpy_to_pil(__A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__A )
| 443
| 0
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
a__ : Any = logging.getLogger(__name__)
@dataclass
class __magic_name__ ( _UpperCamelCase ):
UpperCamelCase : Optional[float] = field(
default=0.0 ,metadata={"help": "The label smoothing epsilon to apply (if not zero)."} )
UpperCamelCase : bool = field(default=_UpperCamelCase ,metadata={"help": "Whether to SortishSamler or not."} )
UpperCamelCase : bool = field(
default=_UpperCamelCase ,metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
UpperCamelCase : bool = field(default=_UpperCamelCase ,metadata={"help": "whether to use adafactor"} )
UpperCamelCase : Optional[float] = field(
default=_UpperCamelCase ,metadata={"help": "Encoder layer dropout probability. Goes into model.config."} )
UpperCamelCase : Optional[float] = field(
default=_UpperCamelCase ,metadata={"help": "Decoder layer dropout probability. Goes into model.config."} )
UpperCamelCase : Optional[float] = field(default=_UpperCamelCase ,metadata={"help": "Dropout probability. Goes into model.config."} )
UpperCamelCase : Optional[float] = field(
default=_UpperCamelCase ,metadata={"help": "Attention dropout probability. Goes into model.config."} )
UpperCamelCase : Optional[str] = field(
default="linear" ,metadata={"help": f"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} ,)
| 309
|
"""simple docstring"""
import qiskit
def A__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_lowerCAmelCase = qiskit.QuantumCircuit(__lowerCamelCase, __lowerCamelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1], [0, 1] )
# Execute the circuit on the qasm simulator
_lowerCAmelCase = qiskit.execute(__lowerCamelCase, __lowerCamelCase, shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__lowerCamelCase )
if __name__ == "__main__":
a__ : Optional[Any] = single_qubit_measure(2, 2)
print(f'Total count for various states are: {counts}')
| 309
| 1
|
__a: Any = """
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
__a: str = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
__a: Tuple = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 108
|
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
A_ : int =np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
A_ : Tuple =[0, 2_5, 5_0]
A_ : int =[2_5, 5_0, 7_5]
A_ : List[str] =fuzz.membership.trimf(X, abca)
A_ : Any =fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
A_ : Optional[Any] =np.ones(7_5)
A_ : int =np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
A_ : Optional[Any] =fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
A_ : Union[str, Any] =fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
A_ : List[Any] =fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
A_ : int =fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
A_ : Optional[Any] =young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
A_ : List[Any] =young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
A_ : Union[str, Any] =fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
A_ : Optional[Any] =fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("""Young""")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("""Middle aged""")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("""union""")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("""intersection""")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("""complement_a""")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("""difference a/b""")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("""alg_sum""")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("""alg_product""")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("""bdd_sum""")
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title("""bdd_difference""")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 650
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__:Optional[int] = {"""configuration_vit_mae""": ["""VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMAEConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Union[str, Any] = [
"""VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMAEForPreTraining""",
"""ViTMAELayer""",
"""ViTMAEModel""",
"""ViTMAEPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:str = [
"""TFViTMAEForPreTraining""",
"""TFViTMAEModel""",
"""TFViTMAEPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__:int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 709
|
"""simple docstring"""
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("""To use the rich extension, install rich with `pip install rich`""")
| 67
| 0
|
"""simple docstring"""
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , _A , _A , _A = None , _A = None , _A = False , **_A , ):
'''simple docstring'''
super().__init__(features=_A , cache_dir=_A , keep_in_memory=_A , **_A )
UpperCamelCase : List[Any] = Sql(
cache_dir=_A , features=_A , sql=_A , con=_A , **_A , )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : Optional[int] = None
UpperCamelCase : Dict = None
UpperCamelCase : Union[str, Any] = None
self.builder.download_and_prepare(
download_config=_A , download_mode=_A , verification_mode=_A , base_path=_A , )
# Build dataset for splits
UpperCamelCase : Optional[int] = self.builder.as_dataset(
split="""train""" , verification_mode=_A , in_memory=self.keep_in_memory )
return dataset
class lowercase__ :
"""simple docstring"""
def __init__( self , _A , _A , _A , _A = None , _A = None , **_A , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
UpperCamelCase : Tuple = dataset
UpperCamelCase : Dict = name
UpperCamelCase : Optional[int] = con
UpperCamelCase : str = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCamelCase : Optional[int] = num_proc
UpperCamelCase : Tuple = to_sql_kwargs
def _a ( self ):
'''simple docstring'''
UpperCamelCase : int = self.to_sql_kwargs.pop("""sql""" , _A )
UpperCamelCase : Optional[Any] = self.to_sql_kwargs.pop("""con""" , _A )
UpperCamelCase : List[Any] = self.to_sql_kwargs.pop("""index""" , _A )
UpperCamelCase : int = self._write(index=_A , **self.to_sql_kwargs )
return written
def _a ( self , _A ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = args
UpperCamelCase : Tuple = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs
UpperCamelCase : Any = query_table(
table=self.dataset.data , key=slice(_A , offset + self.batch_size ) , indices=self.dataset._indices , )
UpperCamelCase : List[str] = batch.to_pandas()
UpperCamelCase : List[str] = df.to_sql(self.name , self.con , index=_A , **_A )
return num_rows or len(_A )
def _a ( self , _A , **_A ):
'''simple docstring'''
UpperCamelCase : List[Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
UpperCamelCase , UpperCamelCase : Optional[int] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _A , _A )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += num_rows
return written
| 102
|
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
UpperCAmelCase =True
except (ImportError, ModuleNotFoundError):
UpperCAmelCase =False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def _A ( _a : str ):
"""simple docstring"""
re.sub("""<n>""" , """""" , _a ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_a ) )
| 617
| 0
|
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class __UpperCamelCase :
def __init__( self : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : int = 13 , _lowerCAmelCase : int = 64 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : bool = True , _lowerCAmelCase : bool = True , _lowerCAmelCase : int = 128 , _lowerCAmelCase : Optional[int]=[16, 32, 64, 128] , _lowerCAmelCase : int = 7 , _lowerCAmelCase : int = 4 , _lowerCAmelCase : int = 37 , _lowerCAmelCase : str = "gelu" , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : int = 10 , _lowerCAmelCase : float = 0.02 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 128 , _lowerCAmelCase : List[int] = [2, 2, 2, 2] , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 2 , ) -> Tuple:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = encoder_stride
__lowercase = num_attention_outputs
__lowercase = embed_dim
__lowercase = embed_dim + 1
__lowercase = resolution
__lowercase = depths
__lowercase = hidden_sizes
__lowercase = dim
__lowercase = mlp_expansion_ratio
def _a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self : Optional[Any] ) -> str:
"""simple docstring"""
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFEfficientFormerModel(config=_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.type_sequence_label_size
__lowercase = TFEfficientFormerForImageClassification(_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowercase = 1
__lowercase = TFEfficientFormerForImageClassification(_lowerCAmelCase )
__lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Any = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
__snake_case :Any = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
__snake_case :int = False
__snake_case :Optional[int] = False
__snake_case :int = False
__snake_case :Any = False
__snake_case :Any = False
def _a ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = TFEfficientFormerModelTester(self )
__lowercase = ConfigTester(
self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def _a ( self : Optional[int] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" )
def _a ( self : Optional[int] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" )
def _a ( self : int ) -> str:
"""simple docstring"""
pass
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
__lowercase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(_lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ):
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
if hasattr(self.model_tester , """encoder_seq_length""" ):
__lowercase = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1:
__lowercase = seq_length * self.model_tester.chunk_length
else:
__lowercase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__lowercase = outputs.decoder_hidden_states
self.asseretIsInstance(_lowerCAmelCase , (list, tuple) )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """decoder_seq_length""" , _lowerCAmelCase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any]=False ) -> Dict:
"""simple docstring"""
__lowercase = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _a ( self : int ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
@unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" )
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCAmelCase )
def _a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFEfficientFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def _a ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
__lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """encoder_seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """key_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """chunk_length""" , _lowerCAmelCase )
if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ):
__lowercase = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__lowercase = True
__lowercase = False
__lowercase = True
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase = True
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__lowercase = model_class(_lowerCAmelCase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__lowercase = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_lowerCAmelCase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__lowercase = model(_lowerCAmelCase )
self.assertTrue(outputs_dict is not None )
def snake_case ( ):
'''simple docstring'''
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
return (
EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" )
if is_vision_available()
else None
)
@slow
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" )
# forward pass
__lowercase = model(**_lowerCAmelCase , training=_lowerCAmelCase )
# verify the logits
__lowercase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = tf.constant([-0.0_555, 0.4_825, -0.0_852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
@slow
def _a ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"""snap-research/efficientformer-l1-300""" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" )
# forward pass
__lowercase = model(**_lowerCAmelCase , training=_lowerCAmelCase )
# verify the logits
__lowercase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = tf.constant([-0.1_312, 0.4_353, -1.0_499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 53
|
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__UpperCamelCase : List[Any] = logging.getLogger(__name__)
__UpperCamelCase : Optional[Any] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
__UpperCamelCase : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __UpperCamelCase :
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={
'help': (
'The model checkpoint for weights initialization. Leave None if you want to train a model from'
' scratch.'
)
} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_lowerCAmelCase )} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class __UpperCamelCase :
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'The input training data file (a text file).'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={
'help': (
'The input training data files (multiple files in glob format). '
'Very often splitting large files to smaller files can prevent tokenizer going out of memory'
)
} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} )
__snake_case :bool = field(default=_lowerCAmelCase , metadata={'help': 'Whether ot not to use whole word mask.'} )
__snake_case :float = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
__snake_case :float = field(
default=1 / 6 , metadata={
'help': (
'Ratio of length of a span of masked tokens to surrounding context length for permutation language'
' modeling.'
)
} , )
__snake_case :int = field(
default=5 , metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} )
__snake_case :int = field(
default=-1 , metadata={
'help': (
'Optional input sequence length after tokenization.'
'The training dataset will be truncated in block of this size for training.'
'Default to the model max input length for single sentence inputs (take into account special tokens).'
)
} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None , ):
'''simple docstring'''
def _dataset(lowerCamelCase , lowerCamelCase=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" )
return LineByLineWithRefDataset(
tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size , ref_path=lowerCamelCase , )
return LineByLineTextDataset(tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowerCamelCase , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowerCamelCase ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def snake_case ( ):
'''simple docstring'''
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"""Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """
"""or remove the --do_eval argument.""" )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowerCamelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__lowercase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__lowercase = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.tokenizer_name:
__lowercase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"""
""" script, save it,and load it from here, using --tokenizer_name""" )
if model_args.model_name_or_path:
__lowercase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , )
else:
logger.info("""Training new model from scratch""" )
__lowercase = AutoModelWithLMHead.from_config(lowerCamelCase )
model.resize_token_embeddings(len(lowerCamelCase ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"""BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"""
"""--mlm flag (masked language modeling).""" )
if data_args.block_size <= 0:
__lowercase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__lowercase = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__lowercase = (
get_dataset(lowerCamelCase , tokenizer=lowerCamelCase , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__lowercase = (
get_dataset(lowerCamelCase , tokenizer=lowerCamelCase , evaluate=lowerCamelCase , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__lowercase = DataCollatorForPermutationLanguageModeling(
tokenizer=lowerCamelCase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__lowercase = DataCollatorForWholeWordMask(
tokenizer=lowerCamelCase , mlm_probability=data_args.mlm_probability )
else:
__lowercase = DataCollatorForLanguageModeling(
tokenizer=lowerCamelCase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowercase = Trainer(
model=lowerCamelCase , args=lowerCamelCase , data_collator=lowerCamelCase , train_dataset=lowerCamelCase , eval_dataset=lowerCamelCase , prediction_loss_only=lowerCamelCase , )
# Training
if training_args.do_train:
__lowercase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowerCamelCase )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__lowercase = trainer.evaluate()
__lowercase = math.exp(eval_output["""eval_loss"""] )
__lowercase = {"""perplexity""": perplexity}
__lowercase = os.path.join(training_args.output_dir , """eval_results_lm.txt""" )
if trainer.is_world_master():
with open(lowerCamelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , lowerCamelCase , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
results.update(lowerCamelCase )
return results
def snake_case ( lowerCamelCase ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 53
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : int = logging.get_logger(__name__)
a__ : Any = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : Any = "lxmert"
snake_case__ : Optional[Any] = {}
def __init__( self : Optional[Any] , UpperCAmelCase__ : List[Any]=3_0_5_2_2 , UpperCAmelCase__ : Optional[int]=7_6_8 , UpperCAmelCase__ : Tuple=1_2 , UpperCAmelCase__ : Tuple=9_5_0_0 , UpperCAmelCase__ : List[str]=1_6_0_0 , UpperCAmelCase__ : Tuple=4_0_0 , UpperCAmelCase__ : Dict=3_0_7_2 , UpperCAmelCase__ : List[Any]="gelu" , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : str=5_1_2 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : Optional[Any]=1E-12 , UpperCAmelCase__ : Optional[int]=9 , UpperCAmelCase__ : Tuple=5 , UpperCAmelCase__ : Any=5 , UpperCAmelCase__ : Dict=2_0_4_8 , UpperCAmelCase__ : Optional[int]=4 , UpperCAmelCase__ : List[Any]=6.67 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Any=True , **UpperCAmelCase__ : str , ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = num_qa_labels
__SCREAMING_SNAKE_CASE = num_object_labels
__SCREAMING_SNAKE_CASE = num_attr_labels
__SCREAMING_SNAKE_CASE = l_layers
__SCREAMING_SNAKE_CASE = x_layers
__SCREAMING_SNAKE_CASE = r_layers
__SCREAMING_SNAKE_CASE = visual_feat_dim
__SCREAMING_SNAKE_CASE = visual_pos_dim
__SCREAMING_SNAKE_CASE = visual_loss_normalizer
__SCREAMING_SNAKE_CASE = task_matched
__SCREAMING_SNAKE_CASE = task_mask_lm
__SCREAMING_SNAKE_CASE = task_obj_predict
__SCREAMING_SNAKE_CASE = task_qa
__SCREAMING_SNAKE_CASE = visual_obj_loss
__SCREAMING_SNAKE_CASE = visual_attr_loss
__SCREAMING_SNAKE_CASE = visual_feat_loss
__SCREAMING_SNAKE_CASE = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
super().__init__(**UpperCAmelCase__ )
| 682
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any]=1_3 , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Tuple=9_9 , UpperCAmelCase__ : Dict=3_2 , UpperCAmelCase__ : int=5 , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : List[Any]=3_7 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Tuple=1_2_8 , UpperCAmelCase__ : Union[str, Any]=3_2 , UpperCAmelCase__ : Any=1_6 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : Tuple=4 , UpperCAmelCase__ : List[str]=None , ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_choices
__SCREAMING_SNAKE_CASE = scope
def UpperCAmelCase_ ( self : str ) -> Any:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self : Optional[int] ) -> Dict:
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] ) -> Any:
__SCREAMING_SNAKE_CASE = NezhaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , ) -> Tuple:
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = NezhaModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] ) -> int:
__SCREAMING_SNAKE_CASE = NezhaForMaskedLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any ) -> Tuple:
__SCREAMING_SNAKE_CASE = NezhaForNextSentencePrediction(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] ) -> List[str]:
__SCREAMING_SNAKE_CASE = NezhaForPreTraining(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , next_sentence_label=UpperCAmelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = NezhaForQuestionAnswering(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = NezhaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ) -> Any:
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = NezhaForTokenClassification(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict ) -> str:
__SCREAMING_SNAKE_CASE = self.num_choices
__SCREAMING_SNAKE_CASE = NezhaForMultipleChoice(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case__ : Tuple = (
{
"feature-extraction": NezhaModel,
"fill-mask": NezhaForMaskedLM,
"question-answering": NezhaForQuestionAnswering,
"text-classification": NezhaForSequenceClassification,
"token-classification": NezhaForTokenClassification,
"zero-shot": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : int = True
def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any]=False ) -> Dict:
__SCREAMING_SNAKE_CASE = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
if return_labels:
if model_class in get_values(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ )
return inputs_dict
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
__SCREAMING_SNAKE_CASE = NezhaModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 )
def UpperCAmelCase_ ( self : int ) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
# This regression test was failing with PyTorch < 1.3
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__SCREAMING_SNAKE_CASE = None
self.model_tester.create_and_check_model_as_decoder(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Dict:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ )
@slow
def UpperCAmelCase_ ( self : List[Any] ) -> int:
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = NezhaModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@slow
@require_torch_gpu
def UpperCAmelCase_ ( self : List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = model_class(config=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.jit.trace(
UpperCAmelCase__ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , "bert.pt" ) )
__SCREAMING_SNAKE_CASE = torch.jit.load(os.path.join(UpperCAmelCase__ , "bert.pt" ) , map_location=UpperCAmelCase__ )
loaded(inputs_dict["input_ids"].to(UpperCAmelCase__ ) , inputs_dict["attention_mask"].to(UpperCAmelCase__ ) )
@require_torch
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self : List[Any] ) -> str:
__SCREAMING_SNAKE_CASE = NezhaModel.from_pretrained("sijunhe/nezha-cn-base" )
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.tensor([[[0.0_685, 0.2_441, 0.1_102], [0.0_600, 0.1_906, 0.1_349], [0.0_221, 0.0_819, 0.0_586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1E-4 ) )
@slow
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE = NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base" )
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__SCREAMING_SNAKE_CASE = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE = torch.Size((1, 6, 2_1_1_2_8) )
self.assertEqual(output.shape , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-2.7_939, -1.7_902, -2.2_189], [-2.8_585, -1.8_908, -2.3_723], [-2.6_499, -1.7_750, -2.2_558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1E-4 ) )
| 682
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
a = ["pixel_values"]
def __init__( self : str , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : PILImageResampling = PIL.Image.BICUBIC , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : Union[int, float] = 1 / 255 , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , **__lowerCamelCase : Tuple , ) -> Optional[Any]:
super().__init__(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = size if size is not None else {"height": 256, "width": 256}
SCREAMING_SNAKE_CASE__ = get_size_dict(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = crop_size if crop_size is not None else {"height": 224, "width": 224}
SCREAMING_SNAKE_CASE__ = get_size_dict(__lowerCamelCase , param_name='''crop_size''' )
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size
SCREAMING_SNAKE_CASE__ = resample
SCREAMING_SNAKE_CASE__ = do_center_crop
SCREAMING_SNAKE_CASE__ = crop_size
SCREAMING_SNAKE_CASE__ = do_rescale
SCREAMING_SNAKE_CASE__ = rescale_factor
SCREAMING_SNAKE_CASE__ = do_normalize
SCREAMING_SNAKE_CASE__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase_ ( self : Dict , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : PILImageResampling = PIL.Image.BICUBIC , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : List[str] , ) -> int:
SCREAMING_SNAKE_CASE__ = get_size_dict(__lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return resize(
__lowerCamelCase , size=(size['''height'''], size['''width''']) , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def lowercase_ ( self : int , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : List[str] , ) -> List[str]:
SCREAMING_SNAKE_CASE__ = get_size_dict(__lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(__lowerCamelCase , size=(size['''height'''], size['''width''']) , data_format=__lowerCamelCase , **__lowerCamelCase )
def lowercase_ ( self : int , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[int, float] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : str , ) -> List[str]:
return rescale(__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def lowercase_ ( self : Optional[int] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Optional[int] , ) -> Any:
return normalize(__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def lowercase_ ( self : int , __lowerCamelCase : ImageInput , __lowerCamelCase : bool = None , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : str=None , __lowerCamelCase : bool = None , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : bool = None , __lowerCamelCase : float = None , __lowerCamelCase : bool = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : ChannelDimension = ChannelDimension.FIRST , **__lowerCamelCase : Dict , ) -> str:
SCREAMING_SNAKE_CASE__ = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE__ = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ = get_size_dict(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE__ = get_size_dict(__lowerCamelCase , param_name='''crop_size''' )
SCREAMING_SNAKE_CASE__ = make_list_of_images(__lowerCamelCase )
if not valid_images(__lowerCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ = [to_numpy_array(__lowerCamelCase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ = [self.resize(image=__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE__ = [self.center_crop(image=__lowerCamelCase , size=__lowerCamelCase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ = [self.rescale(image=__lowerCamelCase , scale=__lowerCamelCase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ = [self.normalize(image=__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase ) for image in images]
SCREAMING_SNAKE_CASE__ = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images]
SCREAMING_SNAKE_CASE__ = {"pixel_values": images}
return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase )
| 705
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = KandinskyImgaImgPipeline
a = ["prompt", "image_embeds", "negative_image_embeds", "image"]
a = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
a = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
a = False
@property
def lowercase_ ( self : str ) -> List[str]:
return 32
@property
def lowercase_ ( self : Optional[int] ) -> int:
return 32
@property
def lowercase_ ( self : Union[str, Any] ) -> int:
return self.time_input_dim
@property
def lowercase_ ( self : List[str] ) -> int:
return self.time_input_dim * 4
@property
def lowercase_ ( self : Union[str, Any] ) -> Any:
return 100
@property
def lowercase_ ( self : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def lowercase_ ( self : List[Any] ) -> List[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
SCREAMING_SNAKE_CASE__ = MultilingualCLIP(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = text_encoder.eval()
return text_encoder
@property
def lowercase_ ( self : str ) -> Dict:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel(**__lowerCamelCase )
return model
@property
def lowercase_ ( self : Dict ) -> Optional[Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase_ ( self : Tuple ) -> Optional[int]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase_ ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ = self.dummy_tokenizer
SCREAMING_SNAKE_CASE__ = self.dummy_unet
SCREAMING_SNAKE_CASE__ = self.dummy_movq
SCREAMING_SNAKE_CASE__ = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
SCREAMING_SNAKE_CASE__ = DDIMScheduler(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def lowercase_ ( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any]=0 ) -> str:
SCREAMING_SNAKE_CASE__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__lowerCamelCase )
# create init_image
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) )
if str(__lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = {
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def lowercase_ ( self : int ) -> Any:
SCREAMING_SNAKE_CASE__ = '''cpu'''
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = self.pipeline_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = pipe(**self.get_dummy_inputs(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = pipe(
**self.get_dummy_inputs(__lowerCamelCase ) , return_dict=__lowerCamelCase , )[0]
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ = np.array(
[0.61474943, 0.6073539, 0.43308544, 0.5928269, 0.47493595, 0.46755973, 0.4613838, 0.45368797, 0.50119233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Tuple ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : List[str] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
SCREAMING_SNAKE_CASE__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE__ = '''A red cartoon frog, 4k'''
SCREAMING_SNAKE_CASE__ = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ = pipeline.to(__lowerCamelCase )
pipeline.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = pipe_prior(
__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE__ = pipeline(
__lowerCamelCase , image=__lowerCamelCase , image_embeds=__lowerCamelCase , negative_image_embeds=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
| 472
| 0
|
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
UpperCAmelCase_ : Dict = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
UpperCAmelCase_ : List[Any] = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = []
for i in range(len(__a ) ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
_SCREAMING_SNAKE_CASE : Optional[int] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(__a ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(__a ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(__a ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
_SCREAMING_SNAKE_CASE : Dict = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(__a )
return next_generation
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = []
for _ in range(__a ):
# Create output image
_SCREAMING_SNAKE_CASE : Optional[Any] = Image.new("""RGB""" , (len(cells[0] ), len(__a )) )
_SCREAMING_SNAKE_CASE : Tuple = img.load()
# Save cells to image
for x in range(len(__a ) ):
for y in range(len(cells[0] ) ):
_SCREAMING_SNAKE_CASE : Dict = 255 - cells[y][x] * 255
_SCREAMING_SNAKE_CASE : Any = (colour, colour, colour)
# Save image
images.append(__a )
_SCREAMING_SNAKE_CASE : Union[str, Any] = new_generation(__a )
return images
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = generate_images(GLIDER, 16)
images[0].save('out.gif', save_all=True, append_images=images[1:])
| 533
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class lowercase_ (unittest.TestCase ):
def __UpperCamelCase ( self) -> int:
a__ =tempfile.mkdtemp()
a__ =BlipImageProcessor()
a__ =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel')
a__ =BlipProcessor(lowercase_ , lowercase_)
processor.save_pretrained(self.tmpdirname)
def __UpperCamelCase ( self , **lowercase_) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_).tokenizer
def __UpperCamelCase ( self , **lowercase_) -> List[str]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_).image_processor
def __UpperCamelCase ( self) -> Optional[int]:
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self) -> str:
a__ =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
a__ =[Image.fromarray(np.moveaxis(lowercase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self) -> str:
a__ =BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
a__ =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
a__ =self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0)
a__ =BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowercase_ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , lowercase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowercase_)
def __UpperCamelCase ( self) -> int:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ =self.prepare_image_inputs()
a__ =image_processor(lowercase_ , return_tensors='np')
a__ =processor(images=lowercase_ , return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def __UpperCamelCase ( self) -> List[str]:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =processor(text=lowercase_)
a__ =tokenizer(lowercase_ , return_token_type_ids=lowercase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __UpperCamelCase ( self) -> int:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =self.prepare_image_inputs()
a__ =processor(text=lowercase_ , images=lowercase_)
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
# test if it raises when no input is passed
with pytest.raises(lowercase_):
processor()
def __UpperCamelCase ( self) -> Tuple:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ =processor.batch_decode(lowercase_)
a__ =tokenizer.batch_decode(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =self.prepare_image_inputs()
a__ =processor(text=lowercase_ , images=lowercase_)
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
| 20
| 0
|
"""simple docstring"""
from collections import deque
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = process_name # process name
__lowerCAmelCase : str = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
__lowerCAmelCase : List[Any] = arrival_time
__lowerCAmelCase : Dict = burst_time # remaining burst time
__lowerCAmelCase : List[Any] = 0 # total time of the process wait in ready queue
__lowerCAmelCase : Any = 0 # time from arrival time to completion time
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
# total number of mlfq's queues
__lowerCAmelCase : int = number_of_queues
# time slice of queues that round robin algorithm applied
__lowerCAmelCase : int = time_slices
# unfinished process is in this ready_queue
__lowerCAmelCase : Optional[int] = queue
# current time
__lowerCAmelCase : Optional[Any] = current_time
# finished process is in this sequence queue
__lowerCAmelCase : deque[Process] = deque()
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = []
for i in range(len(_UpperCAmelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = []
for i in range(len(_UpperCAmelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = []
for i in range(len(_UpperCAmelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
return [q.burst_time for q in queue]
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : deque[Process] = deque() # sequence deque of finished process
while len(_UpperCAmelCase ) != 0:
__lowerCAmelCase : List[str] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_UpperCAmelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
__lowerCAmelCase : int = 0
# set the process's turnaround time because it is finished
__lowerCAmelCase : str = self.current_time - cp.arrival_time
# set the completion time
__lowerCAmelCase : List[str] = self.current_time
# add the process to queue that has finished queue
finished.append(_UpperCAmelCase )
self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_UpperCAmelCase ) ):
__lowerCAmelCase : str = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_UpperCAmelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
__lowerCAmelCase : Tuple = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_UpperCAmelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
__lowerCAmelCase : Optional[Any] = 0
# set the finish time
__lowerCAmelCase : Optional[int] = self.current_time
# update the process' turnaround time because it is finished
__lowerCAmelCase : Dict = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_UpperCAmelCase )
self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def __lowerCamelCase ( self ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
__lowerCAmelCase : List[str] = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
lowerCamelCase__ = Process("""P1""", 0, 53)
lowerCamelCase__ = Process("""P2""", 0, 17)
lowerCamelCase__ = Process("""P3""", 0, 68)
lowerCamelCase__ = Process("""P4""", 0, 24)
lowerCamelCase__ = 3
lowerCamelCase__ = [17, 25]
lowerCamelCase__ = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"""queue""": deque([Pa, Pa, Pa, Pa])})
lowerCamelCase__ = Process("""P1""", 0, 53)
lowerCamelCase__ = Process("""P2""", 0, 17)
lowerCamelCase__ = Process("""P3""", 0, 68)
lowerCamelCase__ = Process("""P4""", 0, 24)
lowerCamelCase__ = 3
lowerCamelCase__ = [17, 25]
lowerCamelCase__ = deque([Pa, Pa, Pa, Pa])
lowerCamelCase__ = MLFQ(number_of_queues, time_slices, queue, 0)
lowerCamelCase__ = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f'waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print completion times of processes(P1, P2, P3, P4)
print(
f'completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f'turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print sequence of finished processes
print(
f'sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'
)
| 713
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""speechbrain/m-ctc-t-large""": """https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json""",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class A__ ( _lowerCamelCase):
A_ : Any = 'mctct'
def __init__( self , _SCREAMING_SNAKE_CASE=80_65 , _SCREAMING_SNAKE_CASE=15_36 , _SCREAMING_SNAKE_CASE=36 , _SCREAMING_SNAKE_CASE=61_44 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=3_84 , _SCREAMING_SNAKE_CASE=9_20 , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=0.3 , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=0.3 , _SCREAMING_SNAKE_CASE=0.3 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0.3 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=(7,) , _SCREAMING_SNAKE_CASE=(3,) , _SCREAMING_SNAKE_CASE=80 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="sum" , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ):
super().__init__(**_SCREAMING_SNAKE_CASE , pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = vocab_size
__lowerCAmelCase : Any = hidden_size
__lowerCAmelCase : int = num_hidden_layers
__lowerCAmelCase : Any = intermediate_size
__lowerCAmelCase : Optional[int] = num_attention_heads
__lowerCAmelCase : str = attention_head_dim
__lowerCAmelCase : int = max_position_embeddings
__lowerCAmelCase : List[Any] = layer_norm_eps
__lowerCAmelCase : Tuple = layerdrop
__lowerCAmelCase : Dict = hidden_act
__lowerCAmelCase : Dict = initializer_range
__lowerCAmelCase : Any = hidden_dropout_prob
__lowerCAmelCase : Dict = attention_probs_dropout_prob
__lowerCAmelCase : List[str] = pad_token_id
__lowerCAmelCase : Union[str, Any] = bos_token_id
__lowerCAmelCase : int = eos_token_id
__lowerCAmelCase : Optional[int] = conv_glu_dim
__lowerCAmelCase : List[Any] = conv_dropout
__lowerCAmelCase : Tuple = num_conv_layers
__lowerCAmelCase : Tuple = input_feat_per_channel
__lowerCAmelCase : Optional[int] = input_channels
__lowerCAmelCase : str = conv_channels
__lowerCAmelCase : str = ctc_loss_reduction
__lowerCAmelCase : Tuple = ctc_zero_infinity
# prevents config testing fail with exporting to json
__lowerCAmelCase : List[str] = list(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = list(_SCREAMING_SNAKE_CASE )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '
f"but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, "
f"`config.num_conv_layers = {self.num_conv_layers}`." )
| 549
| 0
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__UpperCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
__UpperCAmelCase = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : str = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir, 'schedulers/' ) )
UpperCamelCase : int = self.diffusers_dir
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE_, 'src/diffusers/schedulers/scheduling_ddpm.py' ), os.path.join(self.diffusers_dir, 'schedulers/scheduling_ddpm.py' ), )
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : Dict = 'src/diffusers'
shutil.rmtree(self.diffusers_dir )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> Optional[int]:
UpperCamelCase : str = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
UpperCamelCase : List[Any] = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
UpperCamelCase : Union[str, Any] = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=119 )
UpperCamelCase : Any = black.format_str(SCREAMING_SNAKE_CASE_, mode=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = os.path.join(self.diffusers_dir, 'new_code.py' )
with open(SCREAMING_SNAKE_CASE_, 'w', newline='\n' ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(SCREAMING_SNAKE_CASE_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name, overwrite=SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_, 'r' ) as f:
self.assertTrue(f.read(), SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> str:
UpperCamelCase : Any = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' )
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Union[str, Any]:
# Base copy consistency
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput', 'DDPMSchedulerOutput', REFERENCE_CODE + '\n', )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput', 'DDPMSchedulerOutput', SCREAMING_SNAKE_CASE_, )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test', 'TestSchedulerOutput', re.sub('DDPM', 'Test', SCREAMING_SNAKE_CASE_ ), )
# Copy consistency with a really long name
UpperCamelCase : Any = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""", F"""{long_class_name}SchedulerOutput""", re.sub('Bert', SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ), )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test', 'TestSchedulerOutput', SCREAMING_SNAKE_CASE_, overwrite_result=re.sub('DDPM', 'Test', SCREAMING_SNAKE_CASE_ ), )
| 40
|
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : List[str] = ['image_processor', 'tokenizer']
__lowercase : str = 'AutoImageProcessor'
__lowercase : Dict = 'AutoTokenizer'
def __init__( self:int , _a:List[str]=None , _a:Optional[Any]=None , **_a:List[str] ):
snake_case__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
snake_case__ = kwargs.pop('''feature_extractor''' )
snake_case__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
snake_case__ = self.image_processor
snake_case__ = False
def __call__( self:Optional[int] , *_a:str , **_a:int ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_a , **_a )
snake_case__ = kwargs.pop('''images''' , _a )
snake_case__ = kwargs.pop('''text''' , _a )
if len(_a ) > 0:
snake_case__ = args[0]
snake_case__ = args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
snake_case__ = self.image_processor(_a , *_a , **_a )
if text is not None:
snake_case__ = self.tokenizer(_a , **_a )
if text is None:
return inputs
elif images is None:
return encodings
else:
snake_case__ = encodings['''input_ids''']
return inputs
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , *_a:Union[str, Any] , **_a:Any ):
return self.tokenizer.batch_decode(*_a , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Tuple , *_a:Union[str, Any] , **_a:Optional[int] ):
return self.tokenizer.decode(*_a , **_a )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
snake_case__ = True
snake_case__ = self.tokenizer
yield
snake_case__ = self.image_processor
snake_case__ = False
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Dict , _a:Dict=False , _a:Optional[int]=None ):
if added_vocab is None:
snake_case__ = self.tokenizer.get_added_vocab()
snake_case__ = {}
while tokens:
snake_case__ = re.search(r'''<s_(.*?)>''' , _a , re.IGNORECASE )
if start_token is None:
break
snake_case__ = start_token.group(1 )
snake_case__ = re.search(rF"""</s_{key}>""" , _a , re.IGNORECASE )
snake_case__ = start_token.group()
if end_token is None:
snake_case__ = tokens.replace(_a , '''''' )
else:
snake_case__ = end_token.group()
snake_case__ = re.escape(_a )
snake_case__ = re.escape(_a )
snake_case__ = re.search(F"""{start_token_escaped}(.*?){end_token_escaped}""" , _a , re.IGNORECASE )
if content is not None:
snake_case__ = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
snake_case__ = self.tokenajson(_a , is_inner_value=_a , added_vocab=_a )
if value:
if len(_a ) == 1:
snake_case__ = value[0]
snake_case__ = value
else: # leaf nodes
snake_case__ = []
for leaf in content.split(r'''<sep/>''' ):
snake_case__ = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
snake_case__ = leaf[1:-2] # for categorical special tokens
output[key].append(_a )
if len(output[key] ) == 1:
snake_case__ = output[key][0]
snake_case__ = tokens[tokens.find(_a ) + len(_a ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=_a , added_vocab=_a )
if len(_a ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor
| 33
| 0
|
"""simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( UpperCAmelCase_ : int ) -> list[int]:
'''simple docstring'''
__snake_case : Union[str, Any] = [True] * limit
__snake_case : Tuple = False
__snake_case : Union[str, Any] = False
__snake_case : List[str] = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
__snake_case : List[str] = i * 2
while index < limit:
__snake_case : List[str] = False
__snake_case : Union[str, Any] = index + i
__snake_case : str = [2]
for i in range(3 , UpperCAmelCase_ , 2 ):
if is_prime[i]:
primes.append(UpperCAmelCase_ )
return primes
def __UpperCAmelCase ( UpperCAmelCase_ : int = 1_00_00_00 ) -> int:
'''simple docstring'''
__snake_case : Tuple = prime_sieve(UpperCAmelCase_ )
__snake_case : int = 0
__snake_case : str = 0
for i in range(len(UpperCAmelCase_ ) ):
for j in range(i + length , len(UpperCAmelCase_ ) ):
__snake_case : Union[str, Any] = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
__snake_case : List[Any] = j - i
__snake_case : Optional[Any] = sol
return largest
if __name__ == "__main__":
print(f'''{solution() = }''')
| 192
|
"""simple docstring"""
import math
def __UpperCAmelCase ( UpperCAmelCase_ : list , UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
__snake_case : List[str] = len(UpperCAmelCase_ )
__snake_case : List[Any] = int(math.floor(math.sqrt(UpperCAmelCase_ ) ) )
__snake_case : Any = 0
while arr[min(UpperCAmelCase_ , UpperCAmelCase_ ) - 1] < x:
__snake_case : Tuple = step
step += int(math.floor(math.sqrt(UpperCAmelCase_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
__snake_case : Union[str, Any] = prev + 1
if prev == min(UpperCAmelCase_ , UpperCAmelCase_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
_a : str= input("Enter numbers separated by a comma:\n").strip()
_a : int= [int(item) for item in user_input.split(",")]
_a : Optional[Any]= int(input("Enter the number to be searched:\n"))
_a : Tuple= jump_search(arr, x)
if res == -1:
print("Number not found!")
else:
print(f'''Number {x} is at index {res}''')
| 192
| 1
|
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class __magic_name__ ( a_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = WavaVecaPhonemeCTCTokenizer
__UpperCamelCase = False
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
super().setUp()
A_ : Tuple = (
'''<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '''
'''ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '''
'''ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '''
'''oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '''
'''pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '''
'''yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '''
'''əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '''
'''ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '''
'''ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '''
'''uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '''
'''ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '''
'''ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '''
'''ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'''
).split(" " )
A_ : str = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
A_ : Dict = {'''pad_token''': '''<pad>''', '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>'''}
A_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase_ ) + "\n" )
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :int , snake_case :List[str]=False , snake_case :Any=20 , snake_case :int=5 ):
'''simple docstring'''
A_ : Union[str, Any] = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase_ )) for i in range(len(lowercase_ ) )]
A_ : int = list(filter(lambda snake_case : [t[0]] == tokenizer.encode(t[1] , do_phonemize=lowercase_ ) , lowercase_ ) )
if max_length is not None and len(lowercase_ ) > max_length:
A_ : int = toks[:max_length]
if min_length is not None and len(lowercase_ ) < min_length and len(lowercase_ ) > 0:
while len(lowercase_ ) < min_length:
A_ : List[str] = toks + toks
# toks_str = [t[1] for t in toks]
A_ : List[Any] = [t[0] for t in toks]
# Ensure consistency
A_ : Dict = tokenizer.decode(lowercase_ , clean_up_tokenization_spaces=lowercase_ )
if " " not in output_txt and len(lowercase_ ) > 1:
A_ : Tuple = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase_ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase_ )
)
if with_prefix_space:
A_ : List[Any] = ''' ''' + output_txt
A_ : Tuple = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
return output_txt, output_ids
def SCREAMING_SNAKE_CASE ( self :Any , **snake_case :Union[str, Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : int = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
# check adding a single token
tokenizer.add_tokens("xxx" )
A_ : Any = tokenizer("m xxx ɪ" , do_phonemize=lowercase_ ).input_ids
self.assertEqual(lowercase_ , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(["aaa", "bbb", "ccc"] )
A_ : Tuple = tokenizer("m aaa ɪ ccc" , do_phonemize=lowercase_ ).input_ids
self.assertEqual(lowercase_ , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
A_ : List[Any] = tokenizer("maɪ c" , do_phonemize=lowercase_ ).input_ids
self.assertEqual(lowercase_ , [3, 200] ) # mai should be <unk> (=3)
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : Any = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
A_ : int = '''Hello how are you'''
A_ : List[Any] = tokenizer.phonemize(lowercase_ , phonemizer_lang="en-us" )
self.assertEqual(lowercase_ , "h ə l oʊ h aʊ ɑːɹ j uː" )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : str = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
A_ : List[Any] = '''Hello how are you'''
A_ : str = tokenizer.phonemize(lowercase_ , phonemizer_lang="en-us" )
self.assertEqual(tokenizer(lowercase_ ).input_ids , tokenizer(lowercase_ , do_phonemize=lowercase_ ).input_ids )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : str = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
A_ : str = '''Hello how are you'''
A_ : Any = tokenizer.phonemize(lowercase_ , phonemizer_lang="en-us" )
A_ : List[Any] = tokenizer.decode(tokenizer(lowercase_ ).input_ids )
self.assertEqual(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Tuple = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
A_ : int = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
A_ : List[str] = tokenizer.decode(sample_ids[0] )
A_ : List[str] = tokenizer.batch_decode(lowercase_ )
self.assertEqual(lowercase_ , batch_tokens[0] )
self.assertEqual(lowercase_ , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : int = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
A_ : Optional[Any] = '''Hello how are you'''
A_ : Union[str, Any] = tokenizer.phonemize(lowercase_ , phonemizer_lang="en-us" )
self.assertEqual(lowercase_ , "h ə l oʊ | h aʊ | ɑːɹ | j uː |" )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : List[str] = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
A_ : Dict = '''Hello how are you'''
A_ : Union[str, Any] = tokenizer.phonemize(lowercase_ , phonemizer_lang="en-us" )
self.assertEqual(tokenizer(lowercase_ ).input_ids , tokenizer(lowercase_ , do_phonemize=lowercase_ ).input_ids )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : Optional[int] = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
# fmt: off
A_ : Union[str, Any] = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
A_ : Any = tokenizer.decode(sample_ids[0] )
A_ : Optional[int] = tokenizer.batch_decode(lowercase_ )
self.assertEqual(lowercase_ , batch_tokens[0] )
self.assertEqual(lowercase_ , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] )
# decode with no word_del_token filter
A_ : Optional[Any] = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=lowercase_ )
A_ : Any = tokenizer.batch_decode(lowercase_ , filter_word_delimiter_token=lowercase_ )
self.assertEqual(lowercase_ , batch_tokens[0] )
self.assertEqual(lowercase_ , ["k s ɾ | ɾ l | ɭʲ", "| j ð | s j ð s oːɹ"] )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : List[Any] = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
A_ : Optional[int] = '''Hello how are you'''
A_ : Dict = tokenizer.phonemize(lowercase_ , phonemizer_lang="en-us" )
A_ : Tuple = tokenizer.decode(tokenizer(lowercase_ ).input_ids , filter_word_delimiter_token=lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : str = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
A_ : Optional[int] = '''Hello how are you'''
A_ : List[Any] = tokenizer.phonemize(lowercase_ , phonemizer_lang="en-us" )
A_ : int = tokenizer.decode(tokenizer(lowercase_ ).input_ids , filter_word_delimiter_token=lowercase_ )
self.assertEqual(" ".join([p.strip() for p in phonemes.split(" |" )] ).strip() , lowercase_ )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : int = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token=lowercase_ )
A_ : int = '''Hello how are you'''
A_ : Union[str, Any] = tokenizer(lowercase_ , phonemizer_lang="en-us" ).input_ids
A_ : List[Any] = tokenizer(lowercase_ , phonemizer_lang="fr-fr" ).input_ids
self.assertNotEqual(lowercase_ , lowercase_ )
A_ : str = tokenizer.decode(lowercase_ )
A_ : Optional[Any] = tokenizer.decode(lowercase_ )
self.assertEqual(lowercase_ , "h ə l oʊ h aʊ ɑːɹ j uː" )
self.assertEqual(lowercase_ , "ɛ l o h aʊ a ʁ j u" )
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : Dict = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
A_ : List[str] = '''Hello how Are you'''
A_ : Union[str, Any] = '''hello how are you'''
A_ : List[Any] = tokenizer(lowercase_ ).input_ids
A_ : List[str] = tokenizer(lowercase_ ).input_ids
self.assertEqual(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : Any = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
tokenizer.add_tokens(["!", "?"] )
tokenizer.add_special_tokens({"cls_token": "$$$"} )
# fmt: off
A_ : Dict = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
A_ : Tuple = tokenizer.batch_decode(lowercase_ )
self.assertEqual(lowercase_ , ["k s ɾ ɾ l ɭʲ!?!? $$$", "j ð s j ð s oːɹ $$$"] )
@staticmethod
def SCREAMING_SNAKE_CASE ( snake_case :Optional[int] , snake_case :Union[str, Any] ):
'''simple docstring'''
A_ : Union[str, Any] = [d[key] for d in offsets]
return retrieved_list
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : List[str] = self.get_tokenizer(word_delimiter_token="|" )
tokenizer.add_tokens("|" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
A_ : Optional[int] = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
A_ : Union[str, Any] = tokenizer.decode(lowercase_ , output_char_offsets=lowercase_ , filter_word_delimiter_token=lowercase_ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue("text" in outputs )
self.assertTrue("char_offsets" in outputs )
self.assertTrue(isinstance(lowercase_ , lowercase_ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(" ".join(self.get_from_offsets(outputs["char_offsets"] , "char" ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "char" ) , ["k", "s", "ɾ", "ɾ", "|", "ɾ", "l", "|", "ɭʲ"] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "start_offset" ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "end_offset" ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : List[str] = self.get_tokenizer(word_delimiter_token="|" )
def check_list_tuples_equal(snake_case :str , snake_case :Any ):
self.assertTrue(isinstance(lowercase_ , lowercase_ ) )
self.assertTrue(isinstance(outputs_list[0] , lowercase_ ) )
# transform list to ModelOutput
A_ : int = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["text"] , outputs_batch_a["text"] )
def recursive_check(snake_case :int , snake_case :str ):
if isinstance(lowercase_ , lowercase_ ):
[recursive_check(lowercase_ , lowercase_ ) for la, la in zip(lowercase_ , lowercase_ )]
self.assertEqual(lowercase_ , lowercase_ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["char_offsets"] , outputs_batch_a["char_offsets"] )
# fmt: off
A_ : int = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
A_ : int = tokenizer.batch_decode(lowercase_ , output_char_offsets=lowercase_ )
A_ : Optional[Any] = [tokenizer.decode(lowercase_ , output_char_offsets=lowercase_ ) for ids in sample_ids]
check_list_tuples_equal(lowercase_ , lowercase_ )
@unittest.skip("Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes" )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
pass
@unittest.skip("Wav2Vec2PhonemeTokenizer always puts spaces between phonemes" )
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
pass
@unittest.skip("encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency" )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
pass
@unittest.skip("Wav2Vec2PhonemeModel has no max model length => no testing" )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : List[Any] = self.get_tokenizers(do_lower_case=lowercase_ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
A_ : str = tokenizer.vocab_size
A_ : List[str] = len(lowercase_ )
self.assertNotEqual(lowercase_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
A_ : Union[str, Any] = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
A_ : Optional[int] = tokenizer.add_tokens(lowercase_ )
A_ : Optional[Any] = tokenizer.vocab_size
A_ : Optional[Any] = len(lowercase_ )
self.assertNotEqual(lowercase_ , 0 )
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , len(lowercase_ ) )
self.assertEqual(lowercase_ , all_size + len(lowercase_ ) )
A_ : Tuple = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=lowercase_ )
self.assertGreaterEqual(len(lowercase_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
A_ : Dict = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
A_ : Any = tokenizer.add_special_tokens(lowercase_ )
A_ : Union[str, Any] = tokenizer.vocab_size
A_ : Tuple = len(lowercase_ )
self.assertNotEqual(lowercase_ , 0 )
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , len(lowercase_ ) )
self.assertEqual(lowercase_ , all_size_a + len(lowercase_ ) )
A_ : List[Any] = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=lowercase_ )
self.assertGreaterEqual(len(lowercase_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip("The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode." )
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
pass
@unittest.skip("The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode." )
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : Any = self.get_tokenizers(fast=lowercase_ , do_lower_case=lowercase_ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
A_ : Tuple = ['''ð''', '''ɪ''', '''s''', '''ɪ''', '''z''', '''ɐ''', '''t''', '''ɛ''', '''k''', '''s''', '''t''']
A_ : Optional[Any] = tokenizer.convert_tokens_to_string(lowercase_ )
self.assertIsInstance(output["text"] , lowercase_ )
| 454
|
"""simple docstring"""
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __get__( self : List[Any] ,lowercase_ : Any ,lowercase_ : List[str]=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''' )
lowerCAmelCase__ : Optional[Any] = '''__cached_''' + self.fget.__name__
lowerCAmelCase__ : Any = getattr(lowercase_ ,lowercase_ ,lowercase_ )
if cached is None:
lowerCAmelCase__ : str = self.fget(lowercase_ )
setattr(lowercase_ ,lowercase_ ,lowercase_ )
return cached
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : int = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'invalid truth value {val!r}' )
def __SCREAMING_SNAKE_CASE ( A_ ):
if is_torch_fx_proxy(A_ ):
return True
if is_torch_available():
import torch
if isinstance(A_ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(A_ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(A_ , (jnp.ndarray, Tracer) ):
return True
return isinstance(A_ , np.ndarray )
def __SCREAMING_SNAKE_CASE ( A_ ):
return isinstance(A_ , np.ndarray )
def __SCREAMING_SNAKE_CASE ( A_ ):
return _is_numpy(A_ )
def __SCREAMING_SNAKE_CASE ( A_ ):
import torch
return isinstance(A_ , torch.Tensor )
def __SCREAMING_SNAKE_CASE ( A_ ):
return False if not is_torch_available() else _is_torch(A_ )
def __SCREAMING_SNAKE_CASE ( A_ ):
import torch
return isinstance(A_ , torch.device )
def __SCREAMING_SNAKE_CASE ( A_ ):
return False if not is_torch_available() else _is_torch_device(A_ )
def __SCREAMING_SNAKE_CASE ( A_ ):
import torch
if isinstance(A_ , A_ ):
if hasattr(A_ , A_ ):
lowerCAmelCase__ : int = getattr(A_ , A_ )
else:
return False
return isinstance(A_ , torch.dtype )
def __SCREAMING_SNAKE_CASE ( A_ ):
return False if not is_torch_available() else _is_torch_dtype(A_ )
def __SCREAMING_SNAKE_CASE ( A_ ):
import tensorflow as tf
return isinstance(A_ , tf.Tensor )
def __SCREAMING_SNAKE_CASE ( A_ ):
return False if not is_tf_available() else _is_tensorflow(A_ )
def __SCREAMING_SNAKE_CASE ( A_ ):
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(A_ , '''is_symbolic_tensor''' ):
return tf.is_symbolic_tensor(A_ )
return type(A_ ) == tf.Tensor
def __SCREAMING_SNAKE_CASE ( A_ ):
return False if not is_tf_available() else _is_tf_symbolic_tensor(A_ )
def __SCREAMING_SNAKE_CASE ( A_ ):
import jax.numpy as jnp # noqa: F811
return isinstance(A_ , jnp.ndarray )
def __SCREAMING_SNAKE_CASE ( A_ ):
return False if not is_flax_available() else _is_jax(A_ )
def __SCREAMING_SNAKE_CASE ( A_ ):
if isinstance(A_ , (dict, UserDict) ):
return {k: to_py_obj(A_ ) for k, v in obj.items()}
elif isinstance(A_ , (list, tuple) ):
return [to_py_obj(A_ ) for o in obj]
elif is_tf_tensor(A_ ):
return obj.numpy().tolist()
elif is_torch_tensor(A_ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(A_ ):
return np.asarray(A_ ).tolist()
elif isinstance(A_ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def __SCREAMING_SNAKE_CASE ( A_ ):
if isinstance(A_ , (dict, UserDict) ):
return {k: to_numpy(A_ ) for k, v in obj.items()}
elif isinstance(A_ , (list, tuple) ):
return np.array(A_ )
elif is_tf_tensor(A_ ):
return obj.numpy()
elif is_torch_tensor(A_ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(A_ ):
return np.asarray(A_ )
else:
return obj
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Optional[int] = fields(self )
# Safety and consistency checks
if not len(lowercase_ ):
raise ValueError(F'{self.__class__.__name__} has no fields.' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F'{self.__class__.__name__} should not have more than one required field.' )
lowerCAmelCase__ : str = getattr(self ,class_fields[0].name )
lowerCAmelCase__ : List[str] = all(getattr(self ,field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(lowercase_ ):
if isinstance(lowercase_ ,lowercase_ ):
lowerCAmelCase__ : str = first_field.items()
lowerCAmelCase__ : List[str] = True
else:
try:
lowerCAmelCase__ : Union[str, Any] = iter(lowercase_ )
lowerCAmelCase__ : int = True
except TypeError:
lowerCAmelCase__ : Dict = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(lowercase_ ):
if (
not isinstance(lowercase_ ,(list, tuple) )
or not len(lowercase_ ) == 2
or not isinstance(element[0] ,lowercase_ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
lowerCAmelCase__ : Tuple = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'Cannot set key/value for {element}. It needs to be a tuple (key, value).' )
break
setattr(self ,element[0] ,element[1] )
if element[1] is not None:
lowerCAmelCase__ : Dict = element[1]
elif first_field is not None:
lowerCAmelCase__ : Any = first_field
else:
for field in class_fields:
lowerCAmelCase__ : Any = getattr(self ,field.name )
if v is not None:
lowerCAmelCase__ : List[str] = v
def __delitem__( self : List[str] ,*lowercase_ : List[str] ,**lowercase_ : Any ):
raise Exception(F'You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.' )
def __lowerCAmelCase ( self : Optional[int] ,*lowercase_ : Union[str, Any] ,**lowercase_ : List[Any] ):
raise Exception(F'You cannot use ``setdefault`` on a {self.__class__.__name__} instance.' )
def __lowerCAmelCase ( self : str ,*lowercase_ : Union[str, Any] ,**lowercase_ : Any ):
raise Exception(F'You cannot use ``pop`` on a {self.__class__.__name__} instance.' )
def __lowerCAmelCase ( self : int ,*lowercase_ : List[str] ,**lowercase_ : int ):
raise Exception(F'You cannot use ``update`` on a {self.__class__.__name__} instance.' )
def __getitem__( self : Any ,lowercase_ : Any ):
if isinstance(lowercase_ ,lowercase_ ):
lowerCAmelCase__ : Optional[Any] = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Dict ,lowercase_ : Dict ,lowercase_ : int ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(lowercase_ ,lowercase_ )
super().__setattr__(lowercase_ ,lowercase_ )
def __setitem__( self : str ,lowercase_ : Optional[int] ,lowercase_ : Optional[Any] ):
# Will raise a KeyException if needed
super().__setitem__(lowercase_ ,lowercase_ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(lowercase_ ,lowercase_ )
def __lowerCAmelCase ( self : Optional[int] ):
return tuple(self[k] for k in self.keys() )
class SCREAMING_SNAKE_CASE ( a_ , a_ ):
"""simple docstring"""
@classmethod
def __lowerCAmelCase ( cls : Dict ,lowercase_ : Optional[Any] ):
raise ValueError(
F'{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}' )
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = "longest"
lowercase__ = "max_length"
lowercase__ = "do_not_pad"
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = "pt"
lowercase__ = "tf"
lowercase__ = "np"
lowercase__ = "jax"
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : List[Any] ,lowercase_ : List[ContextManager] ):
lowerCAmelCase__ : Optional[int] = context_managers
lowerCAmelCase__ : Tuple = ExitStack()
def __enter__( self : str ):
for context_manager in self.context_managers:
self.stack.enter_context(lowercase_ )
def __exit__( self : Tuple ,*lowercase_ : Tuple ,**lowercase_ : List[Any] ):
self.stack.__exit__(*lowercase_ ,**lowercase_ )
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : Union[str, Any] = infer_framework(A_ )
if framework == "tf":
lowerCAmelCase__ : List[str] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCAmelCase__ : Any = inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCAmelCase__ : Dict = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : List[str] = model_class.__name__
lowerCAmelCase__ : List[Any] = infer_framework(A_ )
if framework == "tf":
lowerCAmelCase__ : Tuple = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCAmelCase__ : List[str] = inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCAmelCase__ : Optional[int] = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def __SCREAMING_SNAKE_CASE ( A_ , A_ = "" , A_ = "." ):
def _flatten_dict(A_ , A_="" , A_="." ):
for k, v in d.items():
lowerCAmelCase__ : Any = str(A_ ) + delimiter + str(A_ ) if parent_key else k
if v and isinstance(A_ , A_ ):
yield from flatten_dict(A_ , A_ , delimiter=A_ ).items()
else:
yield key, v
return dict(_flatten_dict(A_ , A_ , A_ ) )
@contextmanager
def __SCREAMING_SNAKE_CASE ( A_ , A_ = False ):
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def __SCREAMING_SNAKE_CASE ( A_ , A_=None ):
if is_numpy_array(A_ ):
return np.transpose(A_ , axes=A_ )
elif is_torch_tensor(A_ ):
return array.T if axes is None else array.permute(*A_ )
elif is_tf_tensor(A_ ):
import tensorflow as tf
return tf.transpose(A_ , perm=A_ )
elif is_jax_tensor(A_ ):
return jnp.transpose(A_ , axes=A_ )
else:
raise ValueError(f'Type not supported for transpose: {type(A_ )}.' )
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
if is_numpy_array(A_ ):
return np.reshape(A_ , A_ )
elif is_torch_tensor(A_ ):
return array.reshape(*A_ )
elif is_tf_tensor(A_ ):
import tensorflow as tf
return tf.reshape(A_ , A_ )
elif is_jax_tensor(A_ ):
return jnp.reshape(A_ , A_ )
else:
raise ValueError(f'Type not supported for reshape: {type(A_ )}.' )
def __SCREAMING_SNAKE_CASE ( A_ , A_=None ):
if is_numpy_array(A_ ):
return np.squeeze(A_ , axis=A_ )
elif is_torch_tensor(A_ ):
return array.squeeze() if axis is None else array.squeeze(dim=A_ )
elif is_tf_tensor(A_ ):
import tensorflow as tf
return tf.squeeze(A_ , axis=A_ )
elif is_jax_tensor(A_ ):
return jnp.squeeze(A_ , axis=A_ )
else:
raise ValueError(f'Type not supported for squeeze: {type(A_ )}.' )
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
if is_numpy_array(A_ ):
return np.expand_dims(A_ , A_ )
elif is_torch_tensor(A_ ):
return array.unsqueeze(dim=A_ )
elif is_tf_tensor(A_ ):
import tensorflow as tf
return tf.expand_dims(A_ , axis=A_ )
elif is_jax_tensor(A_ ):
return jnp.expand_dims(A_ , axis=A_ )
else:
raise ValueError(f'Type not supported for expand_dims: {type(A_ )}.' )
def __SCREAMING_SNAKE_CASE ( A_ ):
if is_numpy_array(A_ ):
return np.size(A_ )
elif is_torch_tensor(A_ ):
return array.numel()
elif is_tf_tensor(A_ ):
import tensorflow as tf
return tf.size(A_ )
elif is_jax_tensor(A_ ):
return array.size
else:
raise ValueError(f'Type not supported for expand_dims: {type(A_ )}.' )
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
for key, value in auto_map.items():
if isinstance(A_ , (tuple, list) ):
lowerCAmelCase__ : Tuple = [f'{repo_id}--{v}' if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
lowerCAmelCase__ : Tuple = f'{repo_id}--{value}'
return auto_map
def __SCREAMING_SNAKE_CASE ( A_ ):
for base_class in inspect.getmro(A_ ):
lowerCAmelCase__ : List[str] = base_class.__module__
lowerCAmelCase__ : List[Any] = base_class.__name__
if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'Could not infer framework from class {model_class}.' )
| 450
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : float , **lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
lowercase_ = feature_size
lowercase_ = sampling_rate
lowercase_ = padding_value
lowercase_ = kwargs.pop("""padding_side""" , """right""")
lowercase_ = kwargs.pop("""return_attention_mask""" , lowerCAmelCase_)
super().__init__(**lowerCAmelCase_)
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , lowerCAmelCase_ : Union[bool, str, PaddingStrategy] = True , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , ):
"""simple docstring"""
if isinstance(lowerCAmelCase_ , (list, tuple)) and isinstance(processed_features[0] , (dict, BatchFeature)):
lowercase_ = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"""You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"""
F''' to this method that includes {self.model_input_names[0]}, but you provided'''
F''' {list(processed_features.keys())}''')
lowercase_ = processed_features[self.model_input_names[0]]
lowercase_ = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowerCAmelCase_) == 0:
if return_attention_mask:
lowercase_ = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
lowercase_ = required_input[0]
if isinstance(lowerCAmelCase_ , (list, tuple)):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
lowercase_ = 0
while len(required_input[index]) == 0:
index += 1
if index < len(lowerCAmelCase_):
lowercase_ = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowerCAmelCase_):
lowercase_ = """tf"""
elif is_torch_tensor(lowerCAmelCase_):
lowercase_ = """pt"""
elif isinstance(lowerCAmelCase_ , (int, float, list, tuple, np.ndarray)):
lowercase_ = """np"""
else:
raise ValueError(
F'''type of {first_element} unknown: {type(lowerCAmelCase_)}. '''
"""Should be one of a python, numpy, pytorch or tensorflow object.""")
for key, value in processed_features.items():
if isinstance(value[0] , (int, float)):
lowercase_ = to_numpy(lowerCAmelCase_)
else:
lowercase_ = [to_numpy(lowerCAmelCase_) for v in value]
# Convert padding_strategy in PaddingStrategy
lowercase_ = self._get_padding_strategies(padding=lowerCAmelCase_ , max_length=lowerCAmelCase_)
lowercase_ = processed_features[self.model_input_names[0]]
lowercase_ = len(lowerCAmelCase_)
if not all(len(lowerCAmelCase_) == batch_size for v in processed_features.values()):
raise ValueError("""Some items in the output dictionary have a different batch size than others.""")
lowercase_ = []
for i in range(lowerCAmelCase_):
lowercase_ = {k: v[i] for k, v in processed_features.items()}
# truncation
lowercase_ = self._truncate(
lowerCAmelCase_ , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , truncation=lowerCAmelCase_ , )
truncated_inputs.append(lowerCAmelCase_)
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
lowercase_ = max(len(input_slice[self.model_input_names[0]]) for input_slice in truncated_inputs)
lowercase_ = PaddingStrategy.MAX_LENGTH
lowercase_ = {}
for i in range(lowerCAmelCase_):
# padding
lowercase_ = self._pad(
truncated_inputs[i] , max_length=lowerCAmelCase_ , padding_strategy=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
for key, value in outputs.items():
if key not in batch_outputs:
lowercase_ = []
if value.dtype is np.dtype(np.floataa):
lowercase_ = value.astype(np.floataa)
batch_outputs[key].append(lowerCAmelCase_)
return BatchFeature(lowerCAmelCase_ , tensor_type=lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : Union[Dict[str, np.ndarray], BatchFeature] , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , ):
"""simple docstring"""
lowercase_ = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
lowercase_ = len(lowerCAmelCase_)
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
lowercase_ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
lowercase_ = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowerCAmelCase_) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
lowercase_ = np.ones(len(lowerCAmelCase_) , dtype=np.intaa)
if needs_to_be_padded:
lowercase_ = max_length - len(lowerCAmelCase_)
if self.padding_side == "right":
if return_attention_mask:
lowercase_ = np.pad(
processed_features["""attention_mask"""] , (0, difference))
lowercase_ = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
lowercase_ = np.pad(
lowerCAmelCase_ , lowerCAmelCase_ , """constant""" , constant_values=self.padding_value)
elif self.padding_side == "left":
if return_attention_mask:
lowercase_ = np.pad(
processed_features["""attention_mask"""] , (difference, 0))
lowercase_ = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
lowercase_ = np.pad(
lowerCAmelCase_ , lowerCAmelCase_ , """constant""" , constant_values=self.padding_value)
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side))
return processed_features
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : Union[Dict[str, np.ndarray], BatchFeature] , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , ):
"""simple docstring"""
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""")
lowercase_ = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
lowercase_ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
lowercase_ = len(lowerCAmelCase_) > max_length
if needs_to_be_truncated:
lowercase_ = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
lowercase_ = processed_features["""attention_mask"""][:max_length]
return processed_features
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : List[Any]=None):
"""simple docstring"""
if padding is not False:
if padding is True:
lowercase_ = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowerCAmelCase_ , lowerCAmelCase_):
lowercase_ = PaddingStrategy(lowerCAmelCase_)
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_):
lowercase_ = padding
else:
lowercase_ = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''')
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"""Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"""
""" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""")
return padding_strategy
| 100
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
UpperCAmelCase : List[Any] = "▁"
UpperCAmelCase : Union[str, Any] = {"vocab_file": "spiece.model"}
UpperCAmelCase : List[Any] = {
"vocab_file": {
"google/reformer-crime-and-punishment": (
"https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"
)
}
}
UpperCAmelCase : Optional[Any] = {
"google/reformer-crime-and-punishment": 52_4288,
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["input_ids", "attention_mask"]
def __init__( self : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict="</s>" , lowerCAmelCase_ : Dict="<unk>" , lowerCAmelCase_ : Dict=[] , lowerCAmelCase_ : Optional[Dict[str, Any]] = None , **lowerCAmelCase_ : List[str] , ):
"""simple docstring"""
lowercase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
lowercase_ = vocab_file
lowercase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowerCAmelCase_)
@property
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
return self.sp_model.get_piece_size()
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = {self.convert_ids_to_tokens(lowerCAmelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : Union[str, Any]):
"""simple docstring"""
lowercase_ = self.__dict__.copy()
lowercase_ = None
return state
def __setstate__( self : Optional[Any] , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs"""):
lowercase_ = {}
lowercase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : str):
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_)
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : List[str]):
"""simple docstring"""
return self.sp_model.piece_to_id(lowerCAmelCase_)
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Any):
"""simple docstring"""
if index < self.sp_model.get_piece_size():
lowercase_ = self.sp_model.IdToPiece(lowerCAmelCase_)
return token
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : Any):
"""simple docstring"""
lowercase_ = []
lowercase_ = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCAmelCase_) + token
lowercase_ = []
else:
current_sub_tokens.append(lowerCAmelCase_)
out_string += self.sp_model.decode(lowerCAmelCase_)
return out_string.strip()
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase_):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
lowercase_ = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowerCAmelCase_)
elif not os.path.isfile(self.vocab_file):
with open(lowerCAmelCase_ , """wb""") as fi:
lowercase_ = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_)
return (out_vocab_file,)
| 100
| 1
|
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = (boundary[1] - boundary[0]) / steps
_lowerCAmelCase : List[str] = boundary[0]
_lowerCAmelCase : Tuple = boundary[1]
_lowerCAmelCase : Optional[int] = make_points(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[Any] = 0.0
y += (h / 2.0) * f(_lowerCamelCase )
for i in x_i:
# print(i)
y += h * f(_lowerCamelCase )
y += (h / 2.0) * f(_lowerCamelCase )
return y
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = a + h
while x < (b - h):
yield x
_lowerCAmelCase : List[Any] = x + h
def lowerCamelCase__ ( _lowerCamelCase ): # enter your function here
'''simple docstring'''
_lowerCAmelCase : Any = (x - 0) * (x - 0)
return y
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = 0.0 # Lower bound of integration
_lowerCAmelCase : Tuple = 1.0 # Upper bound of integration
_lowerCAmelCase : List[str] = 10.0 # define number of steps or resolution
_lowerCAmelCase : Optional[int] = [a, b] # define boundary of integration
_lowerCAmelCase : Dict = method_a(_lowerCamelCase , _lowerCamelCase )
print(f"""y = {y}""" )
if __name__ == "__main__":
main()
| 259
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _A ( snake_case , snake_case , snake_case , snake_case , ) -> list[float]:
_lowercase , _lowercase : Union[str, Any] = coefficient_matrix.shape
_lowercase , _lowercase : Optional[Any] = constant_matrix.shape
if rowsa != colsa:
_lowercase : Any = F'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(snake_case )
if colsa != 1:
_lowercase : Dict = F'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(snake_case )
if rowsa != rowsa:
_lowercase : int = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
F'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(snake_case )
if len(snake_case ) != rowsa:
_lowercase : Tuple = (
"Number of initial values must be equal to number of rows in coefficient "
F'''matrix but received {len(snake_case )} and {rowsa}'''
)
raise ValueError(snake_case )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
_lowercase : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
_lowercase , _lowercase : Dict = table.shape
strictly_diagonally_dominant(snake_case )
# Iterates the whole matrix for given number of times
for _ in range(snake_case ):
_lowercase : int = []
for row in range(snake_case ):
_lowercase : Tuple = 0
for col in range(snake_case ):
if col == row:
_lowercase : str = table[row][col]
elif col == cols - 1:
_lowercase : List[str] = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
_lowercase : List[str] = (temp + val) / denom
new_val.append(snake_case )
_lowercase : str = new_val
return [float(snake_case ) for i in new_val]
def _A ( snake_case ) -> bool:
_lowercase , _lowercase : Optional[int] = table.shape
_lowercase : Optional[Any] = True
for i in range(0 , snake_case ):
_lowercase : Dict = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 245
| 0
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 408
|
from torch import nn
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
super().__init__()
snake_case__ : Tuple =class_size
snake_case__ : List[Any] =embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
snake_case__ : Optional[Any] =nn.Linear(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
snake_case__ : str =self.mlp(__SCREAMING_SNAKE_CASE )
return logits
| 408
| 1
|
'''simple docstring'''
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Union[str, Any] = logging.get_logger()
# the current default level is logging.WARNING
UpperCAmelCase_ : List[Any] = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = logging.get_verbosity()
UpperCAmelCase_ : Optional[Any] = logging.get_logger("transformers.models.bart.tokenization_bart" )
UpperCAmelCase_ : Tuple = "Testing 1, 2, 3"
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(SCREAMING_SNAKE_CASE_ ) as cl:
logger.warning(SCREAMING_SNAKE_CASE_ )
self.assertEqual(cl.out ,msg + "\n" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(SCREAMING_SNAKE_CASE_ ) as cl:
logger.warning(SCREAMING_SNAKE_CASE_ )
self.assertEqual(cl.out ,"" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(SCREAMING_SNAKE_CASE_ ) as cl:
logger.warning(SCREAMING_SNAKE_CASE_ )
self.assertEqual(cl.out ,msg + "\n" )
# restore to the original level
logging.set_verbosity(SCREAMING_SNAKE_CASE_ )
@mockenv(TRANSFORMERS_VERBOSITY="error" )
def UpperCamelCase__ ( self ):
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
UpperCAmelCase_ : Tuple = logging.get_logger("transformers.models.bart.tokenization_bart" )
UpperCAmelCase_ : int = os.getenv("TRANSFORMERS_VERBOSITY" ,SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : Optional[Any] = logging.log_levels[env_level_str]
UpperCAmelCase_ : Optional[Any] = logging.get_verbosity()
self.assertEqual(
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,f'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' ,)
# restore to the original level
UpperCAmelCase_ : str = ""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="super-error" )
def UpperCamelCase__ ( self ):
transformers.utils.logging._reset_library_root_logger()
UpperCAmelCase_ : int = logging.logging.getLogger()
with CaptureLogger(SCREAMING_SNAKE_CASE_ ) as cl:
# this action activates the env var
logging.get_logger("transformers.models.bart.tokenization_bart" )
self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error" ,cl.out )
# no need to restore as nothing was changed
def UpperCamelCase__ ( self ):
transformers.utils.logging._reset_library_root_logger()
UpperCAmelCase_ : Union[str, Any] = logging.get_logger("transformers.models.bart.tokenization_bart" )
UpperCAmelCase_ : Optional[int] = "Testing 1, 2, 3"
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1" ):
# nothing should be logged as env var disables this method
with CaptureLogger(SCREAMING_SNAKE_CASE_ ) as cl:
logger.warning_advice(SCREAMING_SNAKE_CASE_ )
self.assertEqual(cl.out ,"" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(SCREAMING_SNAKE_CASE_ ) as cl:
logger.warning_advice(SCREAMING_SNAKE_CASE_ )
self.assertEqual(cl.out ,msg + "\n" )
def a__ ( ) -> List[Any]:
"""simple docstring"""
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 71
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase ( lowercase__ ):
lowercase = ['''image_processor''', '''tokenizer''']
lowercase = '''CLIPImageProcessor'''
lowercase = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''')
def __init__(self : Union[str, Any] ,SCREAMING_SNAKE_CASE_ : int=None ,SCREAMING_SNAKE_CASE_ : str=None ,**SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> int:
"""simple docstring"""
lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' ,SCREAMING_SNAKE_CASE_ ,)
lowerCAmelCase = kwargs.pop('''feature_extractor''' )
lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def __call__(self : Union[str, Any] ,SCREAMING_SNAKE_CASE_ : List[str]=None ,SCREAMING_SNAKE_CASE_ : Optional[int]=None ,SCREAMING_SNAKE_CASE_ : Union[str, Any]=None ,**SCREAMING_SNAKE_CASE_ : Any ) -> List[Any]:
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowerCAmelCase = self.tokenizer(SCREAMING_SNAKE_CASE_ ,return_tensors=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
if images is not None:
lowerCAmelCase = self.image_processor(SCREAMING_SNAKE_CASE_ ,return_tensors=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
if text is not None and images is not None:
lowerCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE_ ) ,tensor_type=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : List[str] ,*SCREAMING_SNAKE_CASE_ : Union[str, Any] ,**SCREAMING_SNAKE_CASE_ : Tuple ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : Union[str, Any] ,*SCREAMING_SNAKE_CASE_ : int ,**SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Any:
"""simple docstring"""
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
@property
def UpperCAmelCase (self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase = self.tokenizer.model_input_names
lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 535
| 0
|
def __lowerCAmelCase ( A_ : int = 10 ) -> str:
if not isinstance(A_ , A_ ) or n < 0:
raise ValueError("Invalid input" )
__UpperCAmelCase = 10**n
__UpperCAmelCase = 2_84_33 * (pow(2 , 7_83_04_57 , A_ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"{solution(10) = }")
| 286
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCAmelCase__ ( snake_case ):
"""simple docstring"""
@staticmethod
@abstractmethod
def _UpperCAmelCase ( __lowerCAmelCase: ArgumentParser ) -> Tuple:
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def _UpperCAmelCase ( self: List[str] ) -> List[Any]:
'''simple docstring'''
raise NotImplementedError()
| 286
| 1
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = MODEL_FOR_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE__ : List[str] = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_snake_case : int = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='pt' )
# Using `do_sample=False` to force deterministic output
_snake_case : Any = text_generator('This is a test' , do_sample=snake_case )
self.assertEqual(
snake_case , [
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
] , )
_snake_case : int = text_generator(['This is a test', 'This is a second test'] )
self.assertEqual(
snake_case , [
[
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
],
[
{
'generated_text': (
'This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'
' oscope. oscope. FiliFili@@'
)
}
],
] , )
_snake_case : Any = text_generator('This is a test' , do_sample=snake_case , num_return_sequences=2 , return_tensors=snake_case )
self.assertEqual(
snake_case , [
{'generated_token_ids': ANY(snake_case )},
{'generated_token_ids': ANY(snake_case )},
] , )
_snake_case : str = text_generator.model.config.eos_token_id
_snake_case : Dict = '<pad>'
_snake_case : Optional[Any] = text_generator(
['This is a test', 'This is a second test'] , do_sample=snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=snake_case , )
self.assertEqual(
snake_case , [
[
{'generated_token_ids': ANY(snake_case )},
{'generated_token_ids': ANY(snake_case )},
],
[
{'generated_token_ids': ANY(snake_case )},
{'generated_token_ids': ANY(snake_case )},
],
] , )
@require_tf
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_snake_case : List[str] = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='tf' )
# Using `do_sample=False` to force deterministic output
_snake_case : Tuple = text_generator('This is a test' , do_sample=snake_case )
self.assertEqual(
snake_case , [
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
] , )
_snake_case : Optional[Any] = text_generator(['This is a test', 'This is a second test'] , do_sample=snake_case )
self.assertEqual(
snake_case , [
[
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
],
[
{
'generated_text': (
'This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'
' Cannes 閲閲Cannes Cannes Cannes 攵 please,'
)
}
],
] , )
def __UpperCAmelCase ( self : Optional[Any] , snake_case : Tuple , snake_case : Optional[Any] , snake_case : int ):
"""simple docstring"""
_snake_case : Union[str, Any] = TextGenerationPipeline(model=snake_case , tokenizer=snake_case )
return text_generator, ["This is a test", "Another test"]
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_snake_case : Union[str, Any] = 'Hello I believe in'
_snake_case : Union[str, Any] = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
_snake_case : List[Any] = text_generator(snake_case )
self.assertEqual(
snake_case , [{'generated_text': 'Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'}] , )
_snake_case : Union[str, Any] = text_generator(snake_case , stop_sequence=' fe' )
self.assertEqual(snake_case , [{'generated_text': 'Hello I believe in fe'}] )
def __UpperCAmelCase ( self : Any , snake_case : Optional[Any] , snake_case : List[str] ):
"""simple docstring"""
_snake_case : Tuple = text_generator.model
_snake_case : Dict = text_generator.tokenizer
_snake_case : Union[str, Any] = text_generator('This is a test' )
self.assertEqual(snake_case , [{'generated_text': ANY(snake_case )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
_snake_case : Dict = text_generator('This is a test' , return_full_text=snake_case )
self.assertEqual(snake_case , [{'generated_text': ANY(snake_case )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
_snake_case : int = pipeline(task='text-generation' , model=snake_case , tokenizer=snake_case , return_full_text=snake_case )
_snake_case : str = text_generator('This is a test' )
self.assertEqual(snake_case , [{'generated_text': ANY(snake_case )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
_snake_case : Optional[Any] = text_generator('This is a test' , return_full_text=snake_case )
self.assertEqual(snake_case , [{'generated_text': ANY(snake_case )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
_snake_case : str = text_generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=snake_case )
self.assertEqual(
snake_case , [
[{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}],
[{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}],
] , )
if text_generator.tokenizer.pad_token is not None:
_snake_case : Any = text_generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=snake_case )
self.assertEqual(
snake_case , [
[{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}],
[{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}],
] , )
with self.assertRaises(snake_case ):
_snake_case : Union[str, Any] = text_generator('test' , return_full_text=snake_case , return_text=snake_case )
with self.assertRaises(snake_case ):
_snake_case : Dict = text_generator('test' , return_full_text=snake_case , return_tensors=snake_case )
with self.assertRaises(snake_case ):
_snake_case : Optional[int] = text_generator('test' , return_text=snake_case , return_tensors=snake_case )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
_snake_case : List[str] = text_generator('' )
self.assertEqual(snake_case , [{'generated_text': ANY(snake_case )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
_snake_case : List[Any] = text_generator('' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
_snake_case : Optional[int] = ['RwkvForCausalLM', 'XGLMForCausalLM', 'GPTNeoXForCausalLM']
if (
tokenizer.model_max_length < 10000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('This is a test' * 500 , max_new_tokens=20 )
_snake_case : List[Any] = text_generator('This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(snake_case ):
text_generator(
'This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
import torch
# Classic `model_kwargs`
_snake_case : int = pipeline(
model='hf-internal-testing/tiny-random-bloom' , model_kwargs={'device_map': 'auto', 'torch_dtype': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
_snake_case : Optional[Any] = pipe('This is a test' )
self.assertEqual(
snake_case , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
_snake_case : List[Any] = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
_snake_case : Any = pipe('This is a test' )
self.assertEqual(
snake_case , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
_snake_case : Optional[Any] = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
_snake_case : Optional[int] = pipe('This is a test' )
self.assertEqual(
snake_case , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
@require_torch
@require_torch_gpu
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
import torch
_snake_case : str = pipeline(model='hf-internal-testing/tiny-random-bloom' , device=0 , torch_dtype=torch.floataa )
pipe('This is a test' )
@require_torch
@require_accelerate
@require_torch_gpu
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
import torch
_snake_case : Tuple = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.floataa )
pipe('This is a test' , do_sample=snake_case , top_p=0.5 )
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_snake_case : Optional[Any] = 'Hello world'
_snake_case : List[Any] = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
if text_generator.model.framework == "tf":
_snake_case : Union[str, Any] = logging.get_logger('transformers.generation.tf_utils' )
else:
_snake_case : List[Any] = logging.get_logger('transformers.generation.utils' )
_snake_case : Tuple = 'Both `max_new_tokens`' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(snake_case ) as cl:
_snake_case : Union[str, Any] = text_generator(snake_case , max_length=10 , max_new_tokens=1 )
self.assertIn(snake_case , cl.out )
# The user only sets one -> no warning
with CaptureLogger(snake_case ) as cl:
_snake_case : Dict = text_generator(snake_case , max_new_tokens=1 )
self.assertNotIn(snake_case , cl.out )
with CaptureLogger(snake_case ) as cl:
_snake_case : Optional[int] = text_generator(snake_case , max_length=10 )
self.assertNotIn(snake_case , cl.out )
| 517
|
'''simple docstring'''
from collections import deque
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : int , snake_case : str , snake_case : int , snake_case : int ):
"""simple docstring"""
_snake_case : Optional[Any] = process_name # process name
_snake_case : int = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
_snake_case : List[str] = arrival_time
_snake_case : Optional[Any] = burst_time # remaining burst time
_snake_case : Dict = 0 # total time of the process wait in ready queue
_snake_case : Optional[int] = 0 # time from arrival time to completion time
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : int , snake_case : int , snake_case : list[int] , snake_case : deque[Process] , snake_case : int , ):
"""simple docstring"""
_snake_case : Union[str, Any] = number_of_queues
# time slice of queues that round robin algorithm applied
_snake_case : Optional[Any] = time_slices
# unfinished process is in this ready_queue
_snake_case : Tuple = queue
# current time
_snake_case : List[Any] = current_time
# finished process is in this sequence queue
_snake_case : deque[Process] = deque()
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_snake_case : Optional[int] = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def __UpperCAmelCase ( self : Tuple , snake_case : list[Process] ):
"""simple docstring"""
_snake_case : List[str] = []
for i in range(len(snake_case ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def __UpperCAmelCase ( self : Any , snake_case : list[Process] ):
"""simple docstring"""
_snake_case : Dict = []
for i in range(len(snake_case ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def __UpperCAmelCase ( self : List[Any] , snake_case : list[Process] ):
"""simple docstring"""
_snake_case : Union[str, Any] = []
for i in range(len(snake_case ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def __UpperCAmelCase ( self : Union[str, Any] , snake_case : deque[Process] ):
"""simple docstring"""
return [q.burst_time for q in queue]
def __UpperCAmelCase ( self : Union[str, Any] , snake_case : Process ):
"""simple docstring"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def __UpperCAmelCase ( self : Dict , snake_case : deque[Process] ):
"""simple docstring"""
_snake_case : deque[Process] = deque() # sequence deque of finished process
while len(snake_case ) != 0:
_snake_case : List[Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(snake_case )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
_snake_case : Optional[int] = 0
# set the process's turnaround time because it is finished
_snake_case : Optional[int] = self.current_time - cp.arrival_time
# set the completion time
_snake_case : Tuple = self.current_time
# add the process to queue that has finished queue
finished.append(snake_case )
self.finish_queue.extend(snake_case ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def __UpperCAmelCase ( self : Tuple , snake_case : deque[Process] , snake_case : int ):
"""simple docstring"""
_snake_case : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(snake_case ) ):
_snake_case : Optional[Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(snake_case )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
_snake_case : Tuple = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(snake_case )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
_snake_case : Dict = 0
# set the finish time
_snake_case : Tuple = self.current_time
# update the process' turnaround time because it is finished
_snake_case : Tuple = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(snake_case )
self.finish_queue.extend(snake_case ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
for i in range(self.number_of_queues - 1 ):
_snake_case , _snake_case : Any = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
SCREAMING_SNAKE_CASE_ = Process("P1", 0, 53)
SCREAMING_SNAKE_CASE_ = Process("P2", 0, 17)
SCREAMING_SNAKE_CASE_ = Process("P3", 0, 68)
SCREAMING_SNAKE_CASE_ = Process("P4", 0, 24)
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = [17, 25]
SCREAMING_SNAKE_CASE_ = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
SCREAMING_SNAKE_CASE_ = Process("P1", 0, 53)
SCREAMING_SNAKE_CASE_ = Process("P2", 0, 17)
SCREAMING_SNAKE_CASE_ = Process("P3", 0, 68)
SCREAMING_SNAKE_CASE_ = Process("P4", 0, 24)
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = [17, 25]
SCREAMING_SNAKE_CASE_ = deque([Pa, Pa, Pa, Pa])
SCREAMING_SNAKE_CASE_ = MLFQ(number_of_queues, time_slices, queue, 0)
SCREAMING_SNAKE_CASE_ = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
F'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 517
| 1
|
from __future__ import annotations
import requests
__UpperCAmelCase = set(
"""approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports""".split()
)
def snake_case_ (__A : str , __A : int = 1 , __A : str = "new" , __A : list | None = None ) -> Optional[int]:
__lowerCAmelCase : Dict = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(lowerCAmelCase__ ) - valid_terms ) ):
__lowerCAmelCase : str = f'''Invalid search term: {invalid_search_terms}'''
raise ValueError(lowerCAmelCase__ )
__lowerCAmelCase : Optional[Any] = requests.get(
f'''https://reddit.com/r/{subreddit}/{age}.json?limit={limit}''' , headers={"""User-agent""": """A random string"""} , )
if response.status_code == 4_2_9:
raise requests.HTTPError
__lowerCAmelCase : Optional[int] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(lowerCAmelCase__ )}
__lowerCAmelCase : str = {}
for id_ in range(lowerCAmelCase__ ):
__lowerCAmelCase : Any = {
item: data['data']['children'][id_]['data'][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
| 709
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""caidas/swin2sr-classicalsr-x2-64""": (
"""https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : List[Any] ="swin2sr"
lowerCamelCase : Optional[int] ={
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : str , lowerCAmelCase : List[str]=64 , lowerCAmelCase : Any=1 , lowerCAmelCase : int=3 , lowerCAmelCase : Union[str, Any]=1_80 , lowerCAmelCase : int=[6, 6, 6, 6, 6, 6] , lowerCAmelCase : Any=[6, 6, 6, 6, 6, 6] , lowerCAmelCase : Optional[Any]=8 , lowerCAmelCase : List[Any]=2.0 , lowerCAmelCase : Dict=True , lowerCAmelCase : Tuple=0.0 , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : Optional[Any]="gelu" , lowerCAmelCase : int=False , lowerCAmelCase : Union[str, Any]=0.02 , lowerCAmelCase : int=1e-5 , lowerCAmelCase : str=2 , lowerCAmelCase : Any=1.0 , lowerCAmelCase : Union[str, Any]="1conv" , lowerCAmelCase : str="pixelshuffle" , **lowerCAmelCase : Any , ) -> Dict:
"""simple docstring"""
super().__init__(**lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = image_size
__lowerCAmelCase : Union[str, Any] = patch_size
__lowerCAmelCase : List[str] = num_channels
__lowerCAmelCase : Optional[int] = embed_dim
__lowerCAmelCase : Any = depths
__lowerCAmelCase : str = len(lowerCAmelCase )
__lowerCAmelCase : List[str] = num_heads
__lowerCAmelCase : str = window_size
__lowerCAmelCase : Dict = mlp_ratio
__lowerCAmelCase : List[str] = qkv_bias
__lowerCAmelCase : str = hidden_dropout_prob
__lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__lowerCAmelCase : Tuple = drop_path_rate
__lowerCAmelCase : int = hidden_act
__lowerCAmelCase : Any = use_absolute_embeddings
__lowerCAmelCase : Any = layer_norm_eps
__lowerCAmelCase : Tuple = initializer_range
__lowerCAmelCase : Union[str, Any] = upscale
__lowerCAmelCase : List[Any] = img_range
__lowerCAmelCase : List[Any] = resi_connection
__lowerCAmelCase : Union[str, Any] = upsampler
| 218
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case__ : Optional[Any] = logging.get_logger(__name__)
snake_case__ : Optional[Any] = {"""vocab_file""": """spiece.model"""}
snake_case__ : Dict = {
"""vocab_file""": {
"""bert_for_seq_generation""": (
"""https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"""
),
}
}
snake_case__ : Tuple = {"""bert_for_seq_generation""": 5_1_2}
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = []
A_ = ["""input_ids""", """attention_mask"""]
def __init__( self , _UpperCAmelCase , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<::::>" , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> None:
UpperCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
UpperCamelCase_ = vocab_file
UpperCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCAmelCase )
@property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return self.sp_model.get_piece_size()
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> str:
UpperCamelCase_ = self.__dict__.copy()
UpperCamelCase_ = None
return state
def __setstate__( self , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCamelCase_ = {}
UpperCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> List[str]:
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Union[str, Any]:
return self.sp_model.piece_to_id(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Optional[int]:
UpperCamelCase_ = self.sp_model.IdToPiece(_UpperCAmelCase )
return token
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Union[str, Any]:
UpperCamelCase_ = []
UpperCamelCase_ = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_UpperCAmelCase ) + token
UpperCamelCase_ = []
else:
current_sub_tokens.append(_UpperCAmelCase )
out_string += self.sp_model.decode(_UpperCAmelCase )
return out_string.strip()
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase_ = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase , 'wb' ) as fi:
UpperCamelCase_ = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
| 23
|
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
_snake_case : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(__a )
class UpperCamelCase_ ( __a ):
'''simple docstring'''
def __init__( self :Tuple , **lowerCAmelCase__ :Dict ) ->int:
super().__init__(**lowerCAmelCase__ )
requires_backends(self , "vision" )
requires_backends(self , "torch" )
if self.framework != "pt":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
self.check_model_type(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE( self :Tuple , **lowerCAmelCase__ :Union[str, Any] ) ->List[Any]:
lowercase = {}
lowercase = {}
lowercase = {}
# preprocess args
if "points_per_batch" in kwargs:
lowercase = kwargs["points_per_batch"]
if "points_per_crop" in kwargs:
lowercase = kwargs["points_per_crop"]
if "crops_n_layers" in kwargs:
lowercase = kwargs["crops_n_layers"]
if "crop_overlap_ratio" in kwargs:
lowercase = kwargs["crop_overlap_ratio"]
if "crop_n_points_downscale_factor" in kwargs:
lowercase = kwargs["crop_n_points_downscale_factor"]
# postprocess args
if "pred_iou_thresh" in kwargs:
lowercase = kwargs["pred_iou_thresh"]
if "stability_score_offset" in kwargs:
lowercase = kwargs["stability_score_offset"]
if "mask_threshold" in kwargs:
lowercase = kwargs["mask_threshold"]
if "stability_score_thresh" in kwargs:
lowercase = kwargs["stability_score_thresh"]
if "crops_nms_thresh" in kwargs:
lowercase = kwargs["crops_nms_thresh"]
if "output_rle_mask" in kwargs:
lowercase = kwargs["output_rle_mask"]
if "output_bboxes_mask" in kwargs:
lowercase = kwargs["output_bboxes_mask"]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self :List[str] , lowerCAmelCase__ :int , *lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :Any=None , **lowerCAmelCase__ :List[str] ) ->Optional[int]:
return super().__call__(lowerCAmelCase__ , *lowerCAmelCase__ , num_workers=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE( self :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[Any]=64 , lowerCAmelCase__ :int = 0 , lowerCAmelCase__ :float = 512 / 1500 , lowerCAmelCase__ :Optional[int] = 32 , lowerCAmelCase__ :Optional[int] = 1 , ) ->Any:
lowercase = load_image(lowerCAmelCase__ )
lowercase = self.image_processor.size["longest_edge"]
lowercase , lowercase , lowercase , lowercase = self.image_processor.generate_crop_boxes(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = self.image_processor(images=lowerCAmelCase__ , return_tensors="pt" )
with self.device_placement():
if self.framework == "pt":
lowercase = self.get_inference_context()
with inference_context():
lowercase = self._ensure_tensor_on_device(lowerCAmelCase__ , device=self.device )
lowercase = self.model.get_image_embeddings(model_inputs.pop("pixel_values" ) )
lowercase = image_embeddings
lowercase = grid_points.shape[1]
lowercase = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. "
"To return all points at once, set points_per_batch to None" )
for i in range(0 , lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = grid_points[:, i : i + points_per_batch, :, :]
lowercase = input_labels[:, i : i + points_per_batch]
lowercase = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def SCREAMING_SNAKE_CASE( self :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Dict=0.88 , lowerCAmelCase__ :Dict=0.95 , lowerCAmelCase__ :str=0 , lowerCAmelCase__ :int=1 , ) ->str:
lowercase = model_inputs.pop("input_boxes" )
lowercase = model_inputs.pop("is_last" )
lowercase = model_inputs.pop("original_sizes" ).tolist()
lowercase = model_inputs.pop("reshaped_input_sizes" ).tolist()
lowercase = self.model(**lowerCAmelCase__ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
lowercase = model_outputs["pred_masks"]
lowercase = self.image_processor.post_process_masks(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , binarize=lowerCAmelCase__ )
lowercase = model_outputs["iou_scores"]
lowercase , lowercase , lowercase = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def SCREAMING_SNAKE_CASE( self :Any , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any]=False , lowerCAmelCase__ :str=False , lowerCAmelCase__ :int=0.7 , ) ->List[Any]:
lowercase = []
lowercase = []
lowercase = []
for model_output in model_outputs:
all_scores.append(model_output.pop("iou_scores" ) )
all_masks.extend(model_output.pop("masks" ) )
all_boxes.append(model_output.pop("boxes" ) )
lowercase = torch.cat(lowerCAmelCase__ )
lowercase = torch.cat(lowerCAmelCase__ )
lowercase , lowercase , lowercase , lowercase = self.image_processor.post_process_for_mask_generation(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = defaultdict(lowerCAmelCase__ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowerCAmelCase__ )
lowercase = {}
if output_rle_mask:
lowercase = rle_mask
if output_bboxes_mask:
lowercase = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 441
| 0
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A__ : Optional[Any] = logging.get_logger(__name__)
A__ : List[str] = {
"""ut/deta""": """https://huggingface.co/ut/deta/resolve/main/config.json""",
}
class lowercase__ ( __UpperCAmelCase ):
_UpperCAmelCase :Any = "deta"
_UpperCAmelCase :Dict = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Optional[int] , snake_case__ : Any=None , snake_case__ : Optional[Any]=900 , snake_case__ : Optional[int]=2048 , snake_case__ : List[Any]=6 , snake_case__ : Any=2048 , snake_case__ : List[str]=8 , snake_case__ : Optional[Any]=6 , snake_case__ : Any=1024 , snake_case__ : List[Any]=8 , snake_case__ : Any=0.0 , snake_case__ : Optional[Any]=True , snake_case__ : Dict="relu" , snake_case__ : List[Any]=256 , snake_case__ : Optional[int]=0.1 , snake_case__ : Tuple=0.0 , snake_case__ : Optional[Any]=0.0 , snake_case__ : Any=0.02 , snake_case__ : Union[str, Any]=1.0 , snake_case__ : str=True , snake_case__ : Tuple=False , snake_case__ : List[Any]="sine" , snake_case__ : Dict=5 , snake_case__ : Any=4 , snake_case__ : int=4 , snake_case__ : Dict=True , snake_case__ : Dict=300 , snake_case__ : str=True , snake_case__ : List[str]=True , snake_case__ : Tuple=1 , snake_case__ : Union[str, Any]=5 , snake_case__ : List[Any]=2 , snake_case__ : int=1 , snake_case__ : List[Any]=1 , snake_case__ : Union[str, Any]=5 , snake_case__ : Any=2 , snake_case__ : List[str]=0.1 , snake_case__ : str=0.25 , **snake_case__ : Optional[int] , ):
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
lowerCamelCase_ : Optional[int] =CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(snake_case__ , snake_case__ ):
lowerCamelCase_ : Optional[int] =backbone_config.pop("model_type" )
lowerCamelCase_ : str =CONFIG_MAPPING[backbone_model_type]
lowerCamelCase_ : Optional[Any] =config_class.from_dict(snake_case__ )
lowerCamelCase_ : str =backbone_config
lowerCamelCase_ : Tuple =num_queries
lowerCamelCase_ : Any =max_position_embeddings
lowerCamelCase_ : List[str] =d_model
lowerCamelCase_ : Tuple =encoder_ffn_dim
lowerCamelCase_ : Dict =encoder_layers
lowerCamelCase_ : Tuple =encoder_attention_heads
lowerCamelCase_ : str =decoder_ffn_dim
lowerCamelCase_ : Optional[Any] =decoder_layers
lowerCamelCase_ : Any =decoder_attention_heads
lowerCamelCase_ : Optional[Any] =dropout
lowerCamelCase_ : Union[str, Any] =attention_dropout
lowerCamelCase_ : Any =activation_dropout
lowerCamelCase_ : Any =activation_function
lowerCamelCase_ : List[str] =init_std
lowerCamelCase_ : Optional[int] =init_xavier_std
lowerCamelCase_ : int =encoder_layerdrop
lowerCamelCase_ : Dict =auxiliary_loss
lowerCamelCase_ : Union[str, Any] =position_embedding_type
# deformable attributes
lowerCamelCase_ : Optional[int] =num_feature_levels
lowerCamelCase_ : List[Any] =encoder_n_points
lowerCamelCase_ : Tuple =decoder_n_points
lowerCamelCase_ : Tuple =two_stage
lowerCamelCase_ : List[str] =two_stage_num_proposals
lowerCamelCase_ : Tuple =with_box_refine
lowerCamelCase_ : Dict =assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
lowerCamelCase_ : int =class_cost
lowerCamelCase_ : List[Any] =bbox_cost
lowerCamelCase_ : Optional[Any] =giou_cost
# Loss coefficients
lowerCamelCase_ : List[Any] =mask_loss_coefficient
lowerCamelCase_ : List[Any] =dice_loss_coefficient
lowerCamelCase_ : Dict =bbox_loss_coefficient
lowerCamelCase_ : Any =giou_loss_coefficient
lowerCamelCase_ : int =eos_coefficient
lowerCamelCase_ : Tuple =focal_alpha
super().__init__(is_encoder_decoder=snake_case__ , **snake_case__ )
@property
def UpperCAmelCase__ ( self : str ):
return self.encoder_attention_heads
@property
def UpperCAmelCase__ ( self : List[str] ):
return self.d_model
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : List[Any] =copy.deepcopy(self.__dict__ )
lowerCamelCase_ : str =self.backbone_config.to_dict()
lowerCamelCase_ : List[Any] =self.__class__.model_type
return output
| 717
|
"""simple docstring"""
from __future__ import annotations
def _snake_case ( lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool:
if len(lowerCamelCase__ ) == 0:
return False
lowerCamelCase_ : Dict =len(lowerCamelCase__ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , lowerCamelCase__ )
else:
return binary_search(a_list[midpoint + 1 :] , lowerCamelCase__ )
if __name__ == "__main__":
A__ : Tuple = input('Enter numbers separated by comma:\n').strip()
A__ : Union[str, Any] = [int(item.strip()) for item in user_input.split(',')]
A__ : Optional[Any] = int(input('Enter the number to be found in the list:\n').strip())
A__ : str = '' if binary_search(sequence, target) else 'not '
print(f'{target} was {not_str}found in {sequence}')
| 244
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 201
|
from __future__ import annotations
class lowerCamelCase :
def __init__( self :List[Any] , lowercase :list[list[int]] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = TypeError(
'''Matrices must be formed from a list of zero or more lists containing at '''
'''least one and the same number of values, each of which must be of type '''
'''int or float.''' )
if len(lowercase ) != 0:
SCREAMING_SNAKE_CASE = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(lowercase ) != cols:
raise error
for value in row:
if not isinstance(lowercase , (int, float) ):
raise error
SCREAMING_SNAKE_CASE = rows
else:
SCREAMING_SNAKE_CASE = []
def snake_case__ ( self :List[str] ) -> list[list[int]]:
"""simple docstring"""
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def snake_case__ ( self :Any ) -> int:
"""simple docstring"""
return len(self.rows )
@property
def snake_case__ ( self :List[Any] ) -> int:
"""simple docstring"""
return len(self.rows[0] )
@property
def snake_case__ ( self :Dict ) -> tuple[int, int]:
"""simple docstring"""
return (self.num_rows, self.num_columns)
@property
def snake_case__ ( self :Any ) -> bool:
"""simple docstring"""
return self.order[0] == self.order[1]
def snake_case__ ( self :Tuple ) -> Matrix:
"""simple docstring"""
SCREAMING_SNAKE_CASE = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(lowercase )
def snake_case__ ( self :Union[str, Any] ) -> int:
"""simple docstring"""
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def snake_case__ ( self :int ) -> bool:
"""simple docstring"""
return bool(self.determinant() )
def snake_case__ ( self :List[str] , lowercase :int , lowercase :int ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(lowercase ).determinant()
def snake_case__ ( self :Union[str, Any] , lowercase :int , lowercase :int ) -> int:
"""simple docstring"""
if (row + column) % 2 == 0:
return self.get_minor(lowercase , lowercase )
return -1 * self.get_minor(lowercase , lowercase )
def snake_case__ ( self :List[Any] ) -> Matrix:
"""simple docstring"""
return Matrix(
[
[self.get_minor(lowercase , lowercase ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def snake_case__ ( self :Dict ) -> Matrix:
"""simple docstring"""
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def snake_case__ ( self :str ) -> Matrix:
"""simple docstring"""
SCREAMING_SNAKE_CASE = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(lowercase )
def snake_case__ ( self :Any ) -> Matrix:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.determinant()
if not determinant:
raise TypeError('''Only matrices with a non-zero determinant have an inverse''' )
return self.adjugate() * (1 / determinant)
def __repr__( self :Union[str, Any] ) -> str:
"""simple docstring"""
return str(self.rows )
def __str__( self :List[str] ) -> str:
"""simple docstring"""
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'''[''' + '''. '''.join([str(lowercase ) for value in row] ) + '''.]'''
for row in self.rows
] )
+ "]"
)
def snake_case__ ( self :int , lowercase :list[int] , lowercase :int | None = None ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE = TypeError('''Row must be a list containing all ints and/or floats''' )
if not isinstance(lowercase , lowercase ):
raise type_error
for value in row:
if not isinstance(lowercase , (int, float) ):
raise type_error
if len(lowercase ) != self.num_columns:
raise ValueError(
'''Row must be equal in length to the other rows in the matrix''' )
if position is None:
self.rows.append(lowercase )
else:
SCREAMING_SNAKE_CASE = self.rows[0:position] + [row] + self.rows[position:]
def snake_case__ ( self :Union[str, Any] , lowercase :list[int] , lowercase :int | None = None ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE = TypeError(
'''Column must be a list containing all ints and/or floats''' )
if not isinstance(lowercase , lowercase ):
raise type_error
for value in column:
if not isinstance(lowercase , (int, float) ):
raise type_error
if len(lowercase ) != self.num_rows:
raise ValueError(
'''Column must be equal in length to the other columns in the matrix''' )
if position is None:
SCREAMING_SNAKE_CASE = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
SCREAMING_SNAKE_CASE = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self :Union[str, Any] , lowercase :object ) -> bool:
"""simple docstring"""
if not isinstance(lowercase , lowercase ):
return NotImplemented
return self.rows == other.rows
def __ne__( self :int , lowercase :object ) -> bool:
"""simple docstring"""
return not self == other
def __neg__( self :int ) -> Matrix:
"""simple docstring"""
return self * -1
def __add__( self :Optional[Any] , lowercase :Matrix ) -> Matrix:
"""simple docstring"""
if self.order != other.order:
raise ValueError('''Addition requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self :Optional[Any] , lowercase :Matrix ) -> Matrix:
"""simple docstring"""
if self.order != other.order:
raise ValueError('''Subtraction requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self :Optional[int] , lowercase :Matrix | int | float ) -> Matrix:
"""simple docstring"""
if isinstance(lowercase , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(lowercase , lowercase ):
if self.num_columns != other.num_rows:
raise ValueError(
'''The number of columns in the first matrix must '''
'''be equal to the number of rows in the second''' )
return Matrix(
[
[Matrix.dot_product(lowercase , lowercase ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'''A Matrix can only be multiplied by an int, float, or another matrix''' )
def __pow__( self :str , lowercase :int ) -> Matrix:
"""simple docstring"""
if not isinstance(lowercase , lowercase ):
raise TypeError('''A Matrix can only be raised to the power of an int''' )
if not self.is_square:
raise ValueError('''Only square matrices can be raised to a power''' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'''Only invertable matrices can be raised to a negative power''' )
SCREAMING_SNAKE_CASE = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def snake_case__ ( cls :List[Any] , lowercase :list[int] , lowercase :list[int] ) -> int:
"""simple docstring"""
return sum(row[i] * column[i] for i in range(len(lowercase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 201
| 1
|
from __future__ import annotations
import typing
from collections import Counter
def __lowerCamelCase ( A__ : int ) -> typing.Counter[int]:
lowerCamelCase_ : typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(A__ , max_perimeter + 1 ):
lowerCamelCase_ : str = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(A__ ):
lowerCamelCase_ : Any = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def __lowerCamelCase ( A__ : int = 1000 ) -> int:
lowerCamelCase_ : Union[str, Any] = pythagorean_triple(A__ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(F'Perimeter {solution()} has maximum solutions')
| 171
|
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
snake_case__ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=512,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def __lowerCamelCase ( A__ : Dict ) -> Optional[int]:
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f'''could not parse string as bool {string}''' )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
snake_case__ : Dict = parser.parse_args()
snake_case__ : List[str] = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 171
| 1
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowercase( metaclass=__a ):
'''simple docstring'''
lowercase__ = ["flax"]
def __init__( self: Any, *a_: List[Any], **a_: Union[str, Any] ):
'''simple docstring'''
requires_backends(self, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: Tuple, *a_: List[Any], **a_: List[str] ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: List[Any], *a_: Dict, **a_: str ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
class lowercase( metaclass=__a ):
'''simple docstring'''
lowercase__ = ["flax"]
def __init__( self: Tuple, *a_: Any, **a_: Dict ):
'''simple docstring'''
requires_backends(self, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: Optional[Any], *a_: Dict, **a_: Dict ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: Optional[int], *a_: int, **a_: List[Any] ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
class lowercase( metaclass=__a ):
'''simple docstring'''
lowercase__ = ["flax"]
def __init__( self: Union[str, Any], *a_: int, **a_: List[str] ):
'''simple docstring'''
requires_backends(self, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: List[Any], *a_: int, **a_: List[Any] ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: Optional[Any], *a_: str, **a_: List[str] ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
class lowercase( metaclass=__a ):
'''simple docstring'''
lowercase__ = ["flax"]
def __init__( self: str, *a_: Dict, **a_: int ):
'''simple docstring'''
requires_backends(self, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: List[str], *a_: List[str], **a_: str ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: str, *a_: Any, **a_: List[Any] ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
class lowercase( metaclass=__a ):
'''simple docstring'''
lowercase__ = ["flax"]
def __init__( self: List[str], *a_: int, **a_: Union[str, Any] ):
'''simple docstring'''
requires_backends(self, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: int, *a_: int, **a_: int ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: str, *a_: Dict, **a_: Optional[Any] ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
class lowercase( metaclass=__a ):
'''simple docstring'''
lowercase__ = ["flax"]
def __init__( self: Union[str, Any], *a_: str, **a_: Optional[int] ):
'''simple docstring'''
requires_backends(self, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: Optional[Any], *a_: Optional[int], **a_: List[str] ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: List[Any], *a_: Optional[Any], **a_: List[str] ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
class lowercase( metaclass=__a ):
'''simple docstring'''
lowercase__ = ["flax"]
def __init__( self: Dict, *a_: List[str], **a_: int ):
'''simple docstring'''
requires_backends(self, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: List[str], *a_: List[Any], **a_: Optional[int] ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: Any, *a_: Dict, **a_: Optional[Any] ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
class lowercase( metaclass=__a ):
'''simple docstring'''
lowercase__ = ["flax"]
def __init__( self: str, *a_: Any, **a_: str ):
'''simple docstring'''
requires_backends(self, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: Dict, *a_: Optional[int], **a_: Union[str, Any] ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: int, *a_: Optional[int], **a_: Optional[int] ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
class lowercase( metaclass=__a ):
'''simple docstring'''
lowercase__ = ["flax"]
def __init__( self: Union[str, Any], *a_: List[str], **a_: str ):
'''simple docstring'''
requires_backends(self, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: int, *a_: Any, **a_: Optional[Any] ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: Dict, *a_: Optional[Any], **a_: Optional[Any] ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
class lowercase( metaclass=__a ):
'''simple docstring'''
lowercase__ = ["flax"]
def __init__( self: Optional[Any], *a_: Optional[int], **a_: Optional[Any] ):
'''simple docstring'''
requires_backends(self, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: Any, *a_: List[str], **a_: List[str] ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: Union[str, Any], *a_: Optional[int], **a_: str ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
class lowercase( metaclass=__a ):
'''simple docstring'''
lowercase__ = ["flax"]
def __init__( self: Any, *a_: List[str], **a_: Any ):
'''simple docstring'''
requires_backends(self, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: Dict, *a_: List[str], **a_: Tuple ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: List[Any], *a_: int, **a_: Dict ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
class lowercase( metaclass=__a ):
'''simple docstring'''
lowercase__ = ["flax"]
def __init__( self: Optional[int], *a_: Dict, **a_: Dict ):
'''simple docstring'''
requires_backends(self, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: Optional[Any], *a_: int, **a_: Optional[Any] ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: Tuple, *a_: Union[str, Any], **a_: List[Any] ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
class lowercase( metaclass=__a ):
'''simple docstring'''
lowercase__ = ["flax"]
def __init__( self: Tuple, *a_: Union[str, Any], **a_: Union[str, Any] ):
'''simple docstring'''
requires_backends(self, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: Tuple, *a_: Tuple, **a_: Dict ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
@classmethod
def UpperCamelCase_ ( cls: Tuple, *a_: List[str], **a_: Union[str, Any] ):
'''simple docstring'''
requires_backends(cls, ["""flax"""] )
| 609
|
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
A_ = logging.get_logger(__name__)
A_ = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class lowercase( __a ):
'''simple docstring'''
def __init__( self: List[str], a_: Dict=None, a_: int=None, *a_: List[Any], **a_: Union[str, Any] ):
'''simple docstring'''
super().__init__(*a_, **a_ )
if config is None:
assert isinstance(self.model, a_ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f" {self.model.__class__}"
)
_snake_case : Any = self.model.config
else:
_snake_case : int = config
_snake_case : Union[str, Any] = data_args
_snake_case : Union[str, Any] = self.config.tgt_vocab_size if isinstance(self.config, a_ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"
""" padding..""" )
if self.args.label_smoothing == 0:
_snake_case : Tuple = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_snake_case : Dict = label_smoothed_nll_loss
def UpperCamelCase_ ( self: int, a_: int ):
'''simple docstring'''
if self.optimizer is None:
_snake_case : Optional[Any] = ["""bias""", """LayerNorm.weight"""]
_snake_case : Optional[Any] = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
_snake_case : int = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_snake_case : str = Adafactor
_snake_case : List[Any] = {"""scale_parameter""": False, """relative_step""": False}
else:
_snake_case : Any = AdamW
_snake_case : Tuple = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
_snake_case : List[Any] = self.args.learning_rate
if self.sharded_ddp:
_snake_case : Dict = OSS(
params=a_, optim=a_, **a_, )
else:
_snake_case : Union[str, Any] = optimizer_cls(a_, **a_ )
if self.lr_scheduler is None:
_snake_case : Optional[int] = self._get_lr_scheduler(a_ )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def UpperCamelCase_ ( self: Dict, a_: List[str] ):
'''simple docstring'''
_snake_case : Union[str, Any] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_snake_case : Union[str, Any] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_snake_case : List[Any] = schedule_func(self.optimizer, num_warmup_steps=self.args.warmup_steps )
else:
_snake_case : Tuple = schedule_func(
self.optimizer, num_warmup_steps=self.args.warmup_steps, num_training_steps=a_ )
return scheduler
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
if isinstance(self.train_dataset, torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size, distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED), )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def UpperCamelCase_ ( self: List[str], a_: int, a_: Optional[int], a_: str ):
'''simple docstring'''
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_snake_case : int = model(**a_, use_cache=a_ )[0]
_snake_case : Union[str, Any] = self.loss_fn(logits.view(-1, logits.shape[-1] ), labels.view(-1 ) )
else:
# compute usual loss via models
_snake_case , _snake_case : Optional[Any] = model(**a_, labels=a_, use_cache=a_ )[:2]
else:
# compute label smoothed loss
_snake_case : Union[str, Any] = model(**a_, use_cache=a_ )[0]
_snake_case : Optional[Any] = torch.nn.functional.log_softmax(a_, dim=-1 )
_snake_case , _snake_case : List[Any] = self.loss_fn(a_, a_, self.args.label_smoothing, ignore_index=self.config.pad_token_id )
return loss, logits
def UpperCamelCase_ ( self: List[str], a_: List[Any], a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : Any = inputs.pop("""labels""" )
_snake_case , _snake_case : str = self._compute_loss(a_, a_, a_ )
return loss
def UpperCamelCase_ ( self: Optional[int], a_: nn.Module, a_: Dict[str, Union[torch.Tensor, Any]], a_: bool, a_: Optional[List[str]] = None, ):
'''simple docstring'''
_snake_case : str = self._prepare_inputs(a_ )
_snake_case : List[str] = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_snake_case : List[str] = self.model.generate(
inputs["""input_ids"""], attention_mask=inputs["""attention_mask"""], **a_, )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_snake_case : Union[str, Any] = self._pad_tensors_to_max_len(a_, gen_kwargs["""max_length"""] )
_snake_case : Tuple = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
_snake_case , _snake_case : Dict = self._compute_loss(a_, a_, a_ )
_snake_case : int = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_snake_case : Optional[Any] = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_snake_case : Tuple = self._pad_tensors_to_max_len(a_, gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def UpperCamelCase_ ( self: Tuple, a_: List[str], a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : Dict = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
f" padded to `max_length`={max_length}" )
_snake_case : List[str] = pad_token_id * torch.ones(
(tensor.shape[0], max_length), dtype=tensor.dtype, device=tensor.device )
_snake_case : Tuple = tensor
return padded_tensor
| 609
| 1
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A = 16
A = 32
def a(lowercase__ , lowercase__ = 16 ):
'''simple docstring'''
snake_case_ = AutoTokenizer.from_pretrained('bert-base-cased' )
snake_case_ = load_dataset('glue' , 'mrpc' )
def tokenize_function(lowercase__ ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case_ = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowercase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case_ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case_ = 16
elif accelerator.mixed_precision != "no":
snake_case_ = 8
else:
snake_case_ = None
return tokenizer.pad(
lowercase__ , padding='longest' , max_length=lowercase__ , pad_to_multiple_of=lowercase__ , return_tensors='pt' , )
# Instantiate dataloaders.
snake_case_ = DataLoader(
tokenized_datasets['train'] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
snake_case_ = DataLoader(
tokenized_datasets['validation'] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A = mocked_dataloaders # noqa: F811
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , lowercase__ ) == "1":
snake_case_ = 2
# New Code #
snake_case_ = int(args.gradient_accumulation_steps )
# Initialize accelerator
snake_case_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase__ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ = config['lr']
snake_case_ = int(config['num_epochs'] )
snake_case_ = int(config['seed'] )
snake_case_ = int(config['batch_size'] )
snake_case_ = evaluate.load('glue' , 'mrpc' )
set_seed(lowercase__ )
snake_case_ , snake_case_ = get_dataloaders(lowercase__ , lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=lowercase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case_ = model.to(accelerator.device )
# Instantiate optimizer
snake_case_ = AdamW(params=model.parameters() , lr=lowercase__ )
# Instantiate scheduler
snake_case_ = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=100 , num_training_steps=(len(lowercase__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Now we train the model
for epoch in range(lowercase__ ):
model.train()
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowercase__ ):
snake_case_ = model(**lowercase__ )
snake_case_ = output.loss
accelerator.backward(lowercase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ = model(**lowercase__ )
snake_case_ = outputs.logits.argmax(dim=-1 )
snake_case_ , snake_case_ = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
snake_case_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , lowercase__ )
def a():
'''simple docstring'''
snake_case_ = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=lowercase__ , default=lowercase__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=lowercase__ , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
snake_case_ = parser.parse_args()
snake_case_ = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 46
|
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = name
snake_case_ = val
def __str__( self ):
"""simple docstring"""
return f"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self , __UpperCamelCase ):
"""simple docstring"""
return self.val < other.val
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = {}
snake_case_ = {}
snake_case_ = self.build_heap(__UpperCamelCase )
def __getitem__( self , __UpperCamelCase ):
"""simple docstring"""
return self.get_value(__UpperCamelCase )
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
return (idx - 1) // 2
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
return idx * 2 + 1
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
return idx * 2 + 2
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
return self.heap_dict[key]
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = len(__UpperCamelCase ) - 1
snake_case_ = self.get_parent_idx(__UpperCamelCase )
for idx, i in enumerate(__UpperCamelCase ):
snake_case_ = idx
snake_case_ = i.val
for i in range(__UpperCamelCase , -1 , -1 ):
self.sift_down(__UpperCamelCase , __UpperCamelCase )
return array
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
while True:
snake_case_ = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741
snake_case_ = self.get_right_child_idx(__UpperCamelCase )
snake_case_ = idx
if l < len(__UpperCamelCase ) and array[l] < array[idx]:
snake_case_ = l
if r < len(__UpperCamelCase ) and array[r] < array[smallest]:
snake_case_ = r
if smallest != idx:
snake_case_ , snake_case_ = array[smallest], array[idx]
(
(
snake_case_
) , (
snake_case_
) ,
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
snake_case_ = smallest
else:
break
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.get_parent_idx(__UpperCamelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
snake_case_ , snake_case_ = self.heap[idx], self.heap[p]
snake_case_ , snake_case_ = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
snake_case_ = p
snake_case_ = self.get_parent_idx(__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
return self.heap[0]
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.heap[-1], self.heap[0]
snake_case_ , snake_case_ = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
snake_case_ = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
self.heap.append(__UpperCamelCase )
snake_case_ = len(self.heap ) - 1
snake_case_ = node.val
self.sift_up(len(self.heap ) - 1 )
def __lowerCAmelCase ( self ):
"""simple docstring"""
return len(self.heap ) == 0
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
snake_case_ = new_value
snake_case_ = new_value
self.sift_up(self.idx_of_element[node] )
A = Node('R', -1)
A = Node('B', 6)
A = Node('A', 3)
A = Node('X', 1)
A = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
A = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46
| 1
|
import requests
__magic_name__ : Any = """https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="""
def a_ ( __lowerCAmelCase ):
# fetching a list of articles in json format
lowerCAmelCase__ = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['''articles'''] , 1 ):
print(F"""{i}.) {article['title']}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="""<Your BBC News API key goes here>""")
| 615
|
from scipy.stats import pearsonr
import datasets
__magic_name__ : Union[str, Any] = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
__magic_name__ : Tuple = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
__magic_name__ : int = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ (datasets.Metric ):
def A__ ( self : List[str] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def A__ ( self : Any , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Dict=False ):
"""simple docstring"""
if return_pvalue:
lowerCAmelCase__ = pearsonr(__lowerCamelCase , __lowerCamelCase )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__lowerCamelCase , __lowerCamelCase )[0] )}
| 615
| 1
|
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__UpperCAmelCase = 25_00_04
__UpperCAmelCase = 25_00_20
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =MBartaaTokenizer
UpperCAmelCase_ =MBartaaTokenizerFast
UpperCAmelCase_ =True
UpperCAmelCase_ =True
def _UpperCamelCase ( self ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_ = MBartaaTokenizer(_A , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = '''<s>'''
SCREAMING_SNAKE_CASE_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(_A ) , 1054 )
def _UpperCamelCase ( self ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 1054 )
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = MBartaaTokenizer(_A , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=_A )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE_ = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def _UpperCamelCase ( self ) -> List[Any]:
# fmt: off
SCREAMING_SNAKE_CASE_ = {'''input_ids''': [[250004, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [250004, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250004, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , )
def _UpperCamelCase ( self ) -> str:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
SCREAMING_SNAKE_CASE_ = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE_ = self.rust_tokenizer_class.from_pretrained(_A , **_A )
SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained(_A , **_A )
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = tokenizer_r.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer_p.save_pretrained(_A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
SCREAMING_SNAKE_CASE_ = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(_A , _A )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE_ = tokenizer_r.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_A )
# Save tokenizer rust, legacy_format=True
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = tokenizer_r.save_pretrained(_A , legacy_format=_A )
SCREAMING_SNAKE_CASE_ = tokenizer_p.save_pretrained(_A )
# Checks it save with the same files
self.assertSequenceEqual(_A , _A )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE_ = tokenizer_r.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
shutil.rmtree(_A )
# Save tokenizer rust, legacy_format=False
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = tokenizer_r.save_pretrained(_A , legacy_format=_A )
SCREAMING_SNAKE_CASE_ = tokenizer_p.save_pretrained(_A )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE_ = tokenizer_r.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
shutil.rmtree(_A )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ ="facebook/mbart-large-50-one-to-many-mmt"
UpperCAmelCase_ =[
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
UpperCAmelCase_ =[
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
UpperCAmelCase_ =[EN_CODE, 8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2]
@classmethod
def _UpperCamelCase ( cls ) -> Any:
SCREAMING_SNAKE_CASE_ = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
SCREAMING_SNAKE_CASE_ = 1
return cls
def _UpperCamelCase ( self ) -> Dict:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 250020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 250038 )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _A )
def _UpperCamelCase ( self ) -> Optional[int]:
self.assertIn(_A , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE_ = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
SCREAMING_SNAKE_CASE_ = self.tokenizer.decode(_A , skip_special_tokens=_A )
SCREAMING_SNAKE_CASE_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_A )
self.assertEqual(_A , _A )
self.assertNotIn(self.tokenizer.eos_token , _A )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , _A )
SCREAMING_SNAKE_CASE_ = 10
SCREAMING_SNAKE_CASE_ = self.tokenizer(_A , max_length=_A , truncation=_A ).input_ids[0]
self.assertEqual(ids[0] , _A )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(_A ) , _A )
def _UpperCamelCase ( self ) -> Tuple:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [250053, 250001] )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = MBartaaTokenizer.from_pretrained(_A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _A )
@require_torch
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_A , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_A , truncation=_A , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE_ = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(_A , _A )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _A )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.tokenizer(self.src_text , padding=_A , truncation=_A , max_length=3 , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ = self.tokenizer(
text_target=self.tgt_text , padding=_A , truncation=_A , max_length=10 , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ = targets['''input_ids''']
SCREAMING_SNAKE_CASE_ = shift_tokens_right(_A , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(_A ) , {
# en_XX, A, test, EOS
'''input_ids''': [[250004, 62, 3034, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 250001,
} , )
| 597
|
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 597
| 1
|
import numpy as np
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
|
def _lowerCamelCase ( ):
return 1
def _lowerCamelCase ( a_ : int):
return 0 if x < 0 else two_pence(x - 2) + one_pence()
def _lowerCamelCase ( a_ : int):
return 0 if x < 0 else five_pence(x - 5) + two_pence(a_)
def _lowerCamelCase ( a_ : int):
return 0 if x < 0 else ten_pence(x - 10) + five_pence(a_)
def _lowerCamelCase ( a_ : int):
return 0 if x < 0 else twenty_pence(x - 20) + ten_pence(a_)
def _lowerCamelCase ( a_ : int):
return 0 if x < 0 else fifty_pence(x - 50) + twenty_pence(a_)
def _lowerCamelCase ( a_ : int):
return 0 if x < 0 else one_pound(x - 1_00) + fifty_pence(a_)
def _lowerCamelCase ( a_ : int):
return 0 if x < 0 else two_pound(x - 2_00) + one_pound(a_)
def _lowerCamelCase ( a_ : int = 2_00):
return two_pound(a_)
if __name__ == "__main__":
print(solution(int(input().strip())))
| 166
| 0
|
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = XGLMTokenizer
__SCREAMING_SNAKE_CASE = XGLMTokenizerFast
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase : Tuple = XGLMTokenizer(lowerCamelCase , keep_accents=lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase : Union[str, Any] = "<pad>"
UpperCamelCase : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) , lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
UpperCamelCase : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(len(lowerCamelCase ) , 10_08 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_08 )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
UpperCamelCase : Union[str, Any] = XGLMTokenizer(lowerCamelCase , keep_accents=lowerCamelCase )
UpperCamelCase : Dict = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
UpperCamelCase : Optional[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCamelCase : List[Any] = tokenizer.convert_tokens_to_ids(lowerCamelCase )
self.assertListEqual(
lowerCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCamelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(lowerCamelCase )
self.assertListEqual(
lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
return XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCamelCase , f.name )
UpperCamelCase : str = XGLMTokenizer(f.name , keep_accents=lowerCamelCase )
UpperCamelCase : int = pickle.dumps(lowerCamelCase )
pickle.loads(lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
UpperCamelCase : Any = self.get_tokenizer()
UpperCamelCase : Dict = self.get_rust_tokenizer()
UpperCamelCase : Union[str, Any] = "I was born in 92000, and this is falsé."
UpperCamelCase : Tuple = tokenizer.tokenize(lowerCamelCase )
UpperCamelCase : List[Any] = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
UpperCamelCase : List[str] = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
UpperCamelCase : Optional[int] = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
UpperCamelCase : Optional[Any] = self.get_rust_tokenizer()
UpperCamelCase : Any = tokenizer.encode(lowerCamelCase )
UpperCamelCase : Dict = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
UpperCamelCase : Optional[Any] = "Hello World!"
UpperCamelCase : Optional[Any] = [2, 3_12_27, 44_47, 35]
self.assertListEqual(lowerCamelCase , self.big_tokenizer.encode(lowerCamelCase ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase : Optional[Any] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
UpperCamelCase : Optional[Any] = [2, 10_18, 67, 11, 19_88, 26_17, 56_31, 2_78, 11, 34_07, 48, 7_16_30, 2_80_85, 4, 32_34, 1_57, 13, 6, 5, 6, 4, 35_26, 7_68, 15, 6_59, 57, 2_98, 39_83, 8_64, 1_29, 21, 6, 5, 1_36_75, 3_77, 6_52, 75_80, 1_03_41, 1_55, 28_17, 4_22, 16_66, 7, 16_74, 53, 1_13, 20_22_77, 1_78_92, 33, 60, 87, 4, 32_34, 1_57, 61, 26_67, 5_23_76, 19, 88, 23, 7_35]
# fmt: on
self.assertListEqual(lowerCamelCase , self.big_tokenizer.encode(lowerCamelCase ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
UpperCamelCase : Dict = {
"input_ids": [[2, 10_88_25, 11_63, 15, 8_80_10, 4_73, 1_58_98, 1_57, 1_36_72, 18_57, 3_12, 8, 23_80_21, 11_63, 53, 1_36_72, 18_57, 3_12, 8, 5_32_83, 18_23_96, 8, 1_85_66, 16, 3_67_33, 41_01, 8, 2_30, 24_40_17, 12_25_53, 7, 15, 13_25_97, 4, 2_93, 1_25_11, 76_10, 4, 34_14, 13_25_97, 9, 4, 3_23_61, 3_62, 4, 7_34, 2_85_12, 3_25_69, 18, 4, 3_23_61, 2_60_96, 1_49_82, 73, 1_87_15, 2_14_33, 23_52_61, 15, 4_92, 1_24_27, 16, 53, 1_87_15, 2_14_33, 6_54_54, 15, 2_36_59, 5_63, 16, 2_78, 5_97, 28_43, 5_95, 79_31, 18_23_96, 6_41_86, 22, 8_86, 5_95, 13_29_81, 53, 2_55_40, 34_49, 4_39_82, 3_99_01, 59_51, 8_78, 3_30, 4, 2_76_94, 8_02_69, 3_12, 53, 65_17, 1_17_80, 6_11, 2_04_08, 5], [2, 6, 13_25_97, 67, 4_28_97, 33, 5_92, 8, 16_37_29, 2_55_40, 3_61, 13_69_97, 10_95_14, 17_32_30, 7, 5_01, 60, 10_29_13, 1_96, 56_31, 2_35, 6_32_43, 4_73, 6, 23_17_57, 74, 52_77, 79_05, 53, 30_95, 3_73_17, 22, 4_54, 18_38_74, 5], [2, 2_68, 3_12_98, 4_65_30, 6, 13_29_35, 4_38_31, 7, 5_97, 32, 24, 36_88, 98_65, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase , model_name="facebook/xglm-564M" , padding=lowerCamelCase , )
| 435
|
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def A__ ( A : Namespace):
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name)
lowerCAmelCase_ = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
@staticmethod
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase ) -> Dict:
'''simple docstring'''
UpperCamelCase : Optional[int] = parser.add_parser(
"convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , )
train_parser.add_argument("--model_type" , type=lowerCamelCase , required=lowerCamelCase , help="Model's type." )
train_parser.add_argument(
"--tf_checkpoint" , type=lowerCamelCase , required=lowerCamelCase , help="TensorFlow checkpoint path or folder." )
train_parser.add_argument(
"--pytorch_dump_output" , type=lowerCamelCase , required=lowerCamelCase , help="Path to the PyTorch saved model output." )
train_parser.add_argument("--config" , type=lowerCamelCase , default="" , help="Configuration file path or folder." )
train_parser.add_argument(
"--finetuning_task_name" , type=lowerCamelCase , default=lowerCamelCase , help="Optional fine-tuning task name if the TF model was a finetuned model." , )
train_parser.set_defaults(func=lowerCamelCase )
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , *lowerCamelCase , ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase : str = logging.get_logger("transformers-cli/converting" )
self._logger.info(f'''Loading model {model_type}''' )
UpperCamelCase : List[Any] = model_type
UpperCamelCase : List[str] = tf_checkpoint
UpperCamelCase : Tuple = pytorch_dump_output
UpperCamelCase : Tuple = config
UpperCamelCase : Tuple = finetuning_task_name
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(lowerCamelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase )
if "ckpt" in self._tf_checkpoint.lower():
UpperCamelCase : str = self._tf_checkpoint
UpperCamelCase : Dict = ""
else:
UpperCamelCase : Optional[int] = self._tf_checkpoint
UpperCamelCase : List[Any] = ""
convert_transfo_xl_checkpoint_to_pytorch(
lowerCamelCase , self._config , self._pytorch_dump_output , lowerCamelCase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
| 435
| 1
|
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt')
A__ : Any = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = field(
default=128 ,metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} ,)
lowercase__ = field(
default=A__ ,metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
lowercase__ = field(
default=A__ ,metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} ,)
lowercase__ = field(
default=A__ ,metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} ,)
lowercase__ = field(
default=A__ ,metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} ,)
lowercase__ = field(
default=A__ ,metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} ,)
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = field(
default=A__ ,metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowercase__ = field(
default=A__ ,metadata={"""help""": """Evaluation language. Also train language if `train_language` is set to None."""} )
lowercase__ = field(
default=A__ ,metadata={"""help""": """Train language if it is different from the evaluation language."""} )
lowercase__ = field(
default=A__ ,metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase__ = field(
default=A__ ,metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowercase__ = field(
default=A__ ,metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} ,)
lowercase__ = field(
default=A__ ,metadata={"""help""": """arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"""} ,)
lowercase__ = field(
default=A__ ,metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} ,)
lowercase__ = field(
default="""main""" ,metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} ,)
lowercase__ = field(
default=A__ ,metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} ,)
lowercase__ = field(
default=A__ ,metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} ,)
def a ( ):
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase__ , lowercase__ , lowercase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_xnli''' , lowerCamelCase_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase__ = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase_ )
datasets.utils.logging.set_verbosity(lowerCamelCase_ )
transformers.utils.logging.set_verbosity(lowerCamelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowercase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowercase__ = load_dataset(
'''xnli''' , model_args.language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowercase__ = load_dataset(
'''xnli''' , model_args.train_language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__ = train_dataset.features['''label'''].names
if training_args.do_eval:
lowercase__ = load_dataset(
'''xnli''' , model_args.language , split='''validation''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__ = eval_dataset.features['''label'''].names
if training_args.do_predict:
lowercase__ = load_dataset(
'''xnli''' , model_args.language , split='''test''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__ = predict_dataset.features['''label'''].names
# Labels
lowercase__ = len(lowerCamelCase_ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase_ , idalabel={str(lowerCamelCase_ ): label for i, label in enumerate(lowerCamelCase_ )} , labelaid={label: i for i, label in enumerate(lowerCamelCase_ )} , finetuning_task='''xnli''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__ = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowercase__ = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowercase__ = False
def preprocess_function(lowerCamelCase_ ):
# Tokenize the texts
return tokenizer(
examples['''premise'''] , examples['''hypothesis'''] , padding=lowerCamelCase_ , max_length=data_args.max_seq_length , truncation=lowerCamelCase_ , )
if training_args.do_train:
if data_args.max_train_samples is not None:
lowercase__ = min(len(lowerCamelCase_ ) , data_args.max_train_samples )
lowercase__ = train_dataset.select(range(lowerCamelCase_ ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
lowercase__ = train_dataset.map(
lowerCamelCase_ , batched=lowerCamelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on train dataset''' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(lowerCamelCase_ ) ) , 3 ):
logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowercase__ = min(len(lowerCamelCase_ ) , data_args.max_eval_samples )
lowercase__ = eval_dataset.select(range(lowerCamelCase_ ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
lowercase__ = eval_dataset.map(
lowerCamelCase_ , batched=lowerCamelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on validation dataset''' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowercase__ = min(len(lowerCamelCase_ ) , data_args.max_predict_samples )
lowercase__ = predict_dataset.select(range(lowerCamelCase_ ) )
with training_args.main_process_first(desc='''prediction dataset map pre-processing''' ):
lowercase__ = predict_dataset.map(
lowerCamelCase_ , batched=lowerCamelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on prediction dataset''' , )
# Get the metric function
lowercase__ = evaluate.load('''xnli''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCamelCase_ ):
lowercase__ = p.predictions[0] if isinstance(p.predictions , lowerCamelCase_ ) else p.predictions
lowercase__ = np.argmax(lowerCamelCase_ , axis=1 )
return metric.compute(predictions=lowerCamelCase_ , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowercase__ = default_data_collator
elif training_args.fpaa:
lowercase__ = DataCollatorWithPadding(lowerCamelCase_ , pad_to_multiple_of=8 )
else:
lowercase__ = None
# Initialize our Trainer
lowercase__ = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCamelCase_ , tokenizer=lowerCamelCase_ , data_collator=lowerCamelCase_ , )
# Training
if training_args.do_train:
lowercase__ = None
if training_args.resume_from_checkpoint is not None:
lowercase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase__ = last_checkpoint
lowercase__ = trainer.train(resume_from_checkpoint=lowerCamelCase_ )
lowercase__ = train_result.metrics
lowercase__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase_ )
)
lowercase__ = min(lowerCamelCase_ , len(lowerCamelCase_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , lowerCamelCase_ )
trainer.save_metrics('''train''' , lowerCamelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase__ = trainer.evaluate(eval_dataset=lowerCamelCase_ )
lowercase__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase_ )
lowercase__ = min(lowerCamelCase_ , len(lowerCamelCase_ ) )
trainer.log_metrics('''eval''' , lowerCamelCase_ )
trainer.save_metrics('''eval''' , lowerCamelCase_ )
# Prediction
if training_args.do_predict:
logger.info('''*** Predict ***''' )
lowercase__ , lowercase__ , lowercase__ = trainer.predict(lowerCamelCase_ , metric_key_prefix='''predict''' )
lowercase__ = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(lowerCamelCase_ )
)
lowercase__ = min(lowerCamelCase_ , len(lowerCamelCase_ ) )
trainer.log_metrics('''predict''' , lowerCamelCase_ )
trainer.save_metrics('''predict''' , lowerCamelCase_ )
lowercase__ = np.argmax(lowerCamelCase_ , axis=1 )
lowercase__ = os.path.join(training_args.output_dir , '''predictions.txt''' )
if trainer.is_world_process_zero():
with open(lowerCamelCase_ , '''w''' ) as writer:
writer.write('''index\tprediction\n''' )
for index, item in enumerate(lowerCamelCase_ ):
lowercase__ = label_list[item]
writer.write(F"""{index}\t{item}\n""" )
if __name__ == "__main__":
main()
| 183
|
def a ( lowerCamelCase_ ):
'''simple docstring'''
if p < 2:
raise ValueError('''p should not be less than 2!''' )
elif p == 2:
return True
lowercase__ = 4
lowercase__ = (1 << p) - 1
for _ in range(p - 2 ):
lowercase__ = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 183
| 1
|
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __magic_name__ ( lowercase_ ):
"""simple docstring"""
def _UpperCAmelCase ( self ):
_lowerCamelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(UpperCamelCase__ , '''neck_hidden_sizes''' ) )
self.parent.assertTrue(hasattr(UpperCamelCase__ , '''num_attention_heads''' ) )
class __magic_name__ :
"""simple docstring"""
def __init__( self , a__ , a__=13 , a__=32 , a__=2 , a__=3 , a__=6_40 , a__=4 , a__="silu" , a__=3 , a__=32 , a__=0.1 , a__=0.1 , a__=0.1 , a__=0.02 , a__=True , a__=True , a__=10 , a__=None , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = image_size
_lowerCamelCase = patch_size
_lowerCamelCase = num_channels
_lowerCamelCase = last_hidden_size
_lowerCamelCase = num_attention_heads
_lowerCamelCase = hidden_act
_lowerCamelCase = conv_kernel_size
_lowerCamelCase = output_stride
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = classifier_dropout_prob
_lowerCamelCase = use_labels
_lowerCamelCase = is_training
_lowerCamelCase = num_labels
_lowerCamelCase = initializer_range
_lowerCamelCase = scope
def _UpperCAmelCase ( self ):
_lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase = None
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.num_labels )
_lowerCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_lowerCamelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def _UpperCAmelCase ( self ):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , a__ , a__ , a__ , a__ ):
_lowerCamelCase = MobileViTModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_lowerCamelCase = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _UpperCAmelCase ( self , a__ , a__ , a__ , a__ ):
_lowerCamelCase = self.num_labels
_lowerCamelCase = MobileViTForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_lowerCamelCase = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self , a__ , a__ , a__ , a__ ):
_lowerCamelCase = self.num_labels
_lowerCamelCase = MobileViTForSemanticSegmentation(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_lowerCamelCase = model(UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_lowerCamelCase = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _UpperCAmelCase ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = config_and_inputs
_lowerCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowercase_ ,lowercase_ ,unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
_UpperCamelCase = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
def _UpperCAmelCase ( self ):
_lowerCamelCase = MobileViTModelTester(self )
_lowerCamelCase = MobileViTConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def _UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViT does not use inputs_embeds''' )
def _UpperCAmelCase ( self ):
pass
@unittest.skip(reason='''MobileViT does not support input and output embeddings''' )
def _UpperCAmelCase ( self ):
pass
@unittest.skip(reason='''MobileViT does not output attentions''' )
def _UpperCAmelCase ( self ):
pass
def _UpperCAmelCase ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(UpperCamelCase__ )
_lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase = [*signature.parameters.keys()]
_lowerCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _UpperCAmelCase ( self ):
pass
def _UpperCAmelCase ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _UpperCAmelCase ( self ):
def check_hidden_states_output(a__ , a__ , a__ ):
_lowerCamelCase = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
_lowerCamelCase = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
_lowerCamelCase = outputs.hidden_states
_lowerCamelCase = 5
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_lowerCamelCase = 2
for i in range(len(UpperCamelCase__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _UpperCAmelCase ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def _UpperCAmelCase ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase__ )
@slow
def _UpperCAmelCase ( self ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase = MobileViTModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def _lowerCamelCase ( ):
"""simple docstring"""
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCAmelCase ( self ):
return MobileViTImageProcessor.from_pretrained('''apple/mobilevit-xx-small''' ) if is_vision_available() else None
@slow
def _UpperCAmelCase ( self ):
_lowerCamelCase = MobileViTForImageClassification.from_pretrained('''apple/mobilevit-xx-small''' ).to(UpperCamelCase__ )
_lowerCamelCase = self.default_image_processor
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(images=UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
_lowerCamelCase = model(**UpperCamelCase__ )
# verify the logits
_lowerCamelCase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
_lowerCamelCase = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) )
@slow
def _UpperCAmelCase ( self ):
_lowerCamelCase = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
_lowerCamelCase = model.to(UpperCamelCase__ )
_lowerCamelCase = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(images=UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
_lowerCamelCase = model(**UpperCamelCase__ )
_lowerCamelCase = outputs.logits
# verify the logits
_lowerCamelCase = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , UpperCamelCase__ )
_lowerCamelCase = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=UpperCamelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
@slow
def _UpperCAmelCase ( self ):
_lowerCamelCase = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
_lowerCamelCase = model.to(UpperCamelCase__ )
_lowerCamelCase = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(images=UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
_lowerCamelCase = model(**UpperCamelCase__ )
_lowerCamelCase = outputs.logits.detach().cpu()
_lowerCamelCase = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(50, 60)] )
_lowerCamelCase = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
_lowerCamelCase = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ )
_lowerCamelCase = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
| 711
|
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
def _lowerCamelCase ( _a , _a , _a ):
"""simple docstring"""
_lowerCamelCase = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(_a ) == len(_a ), F'''{len(_a )} != {len(_a )}'''
dest_layers.load_state_dict(layers_to_copy.state_dict() )
_UpperCAmelCase = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
_UpperCAmelCase = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def _lowerCamelCase ( _a , _a ):
"""simple docstring"""
try:
_lowerCamelCase = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'''no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'''
F''' {n_student}''' )
return list(range(_a ) )
def _lowerCamelCase ( _a , _a ):
"""simple docstring"""
if n_student > n_teacher:
raise ValueError(F'''Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}''' )
elif n_teacher == n_student:
return list(range(_a ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def _lowerCamelCase ( _a , _a = "student" , _a = None , _a = None , _a=False , _a=None , _a=None , **_a , ):
"""simple docstring"""
_lowerCamelCase = '''encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'''
assert (e is not None) or (d is not None), _msg
if isinstance(_a , _a ):
AutoTokenizer.from_pretrained(_a ).save_pretrained(_a ) # purely for convenience
_lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained(_a ).eval()
else:
assert isinstance(_a , _a ), F'''teacher must be a model or string got type {type(_a )}'''
_lowerCamelCase = teacher.config.to_diff_dict()
try:
_lowerCamelCase , _lowerCamelCase = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
_lowerCamelCase = teacher_e
if d is None:
_lowerCamelCase = teacher_d
init_kwargs.update({'''encoder_layers''': e, '''decoder_layers''': d} )
except AttributeError: # T5
if hasattr(teacher.config , '''num_encoder_layers''' ):
_lowerCamelCase , _lowerCamelCase = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
_lowerCamelCase , _lowerCamelCase = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
_lowerCamelCase = teacher_e
if d is None:
_lowerCamelCase = teacher_d
if hasattr(teacher.config , '''num_encoder_layers''' ):
init_kwargs.update({'''num_encoder_layers''': e, '''num_decoder_layers''': d} )
else:
init_kwargs.update({'''num_layers''': e, '''num_decoder_layers''': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(_a )
# Copy weights
_lowerCamelCase = teacher.config_class(**_a )
_lowerCamelCase = AutoModelForSeqaSeqLM.from_config(_a )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
_lowerCamelCase = student.load_state_dict(teacher.state_dict() , strict=_a )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
_lowerCamelCase , _lowerCamelCase = list(range(_a ) ), list(range(_a ) )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'''
F''' {save_path}''' )
student.save_pretrained(_a )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
_lowerCamelCase = pick_layers_to_copy(_a , _a )
if d_layers_to_copy is None:
_lowerCamelCase = pick_layers_to_copy(_a , _a )
try:
if hasattr(
_a , '''prophetnet''' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , _a )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , _a )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , _a )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , _a )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , _a )
copy_layers(teacher.decoder.block , student.decoder.block , _a )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}''' )
_lowerCamelCase = {
'''teacher_type''': teacher.config.model_type,
'''copied_encoder_layers''': e_layers_to_copy,
'''copied_decoder_layers''': d_layers_to_copy,
}
student.save_pretrained(_a )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 297
| 0
|
'''simple docstring'''
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase ( snake_case_ , unittest.TestCase ):
_lowercase: Optional[int] = ProphetNetTokenizer
_lowercase: Union[str, Any] = False
def lowercase__ ( self : Dict ) -> Optional[int]:
super().setUp()
_lowerCAmelCase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def lowercase__ ( self : Tuple , __snake_case : str ) -> List[str]:
_lowerCAmelCase = """UNwant\u00E9d,running"""
_lowerCAmelCase = """unwanted, running"""
return input_text, output_text
def lowercase__ ( self : Union[str, Any] ) -> Optional[int]:
_lowerCAmelCase = self.tokenizer_class(self.vocab_file )
_lowerCAmelCase = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(__snake_case , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [9, 6, 7, 12, 10, 11] )
def lowercase__ ( self : List[Any] ) -> List[str]:
_lowerCAmelCase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
_lowerCAmelCase = BasicTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def lowercase__ ( self : Dict ) -> int:
_lowerCAmelCase = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def lowercase__ ( self : Dict ) -> int:
_lowerCAmelCase = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def lowercase__ ( self : Dict ) -> Optional[Any]:
_lowerCAmelCase = BasicTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def lowercase__ ( self : str ) -> List[str]:
_lowerCAmelCase = BasicTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowercase__ ( self : Union[str, Any] ) -> str:
_lowerCAmelCase = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowercase__ ( self : Tuple ) -> str:
_lowerCAmelCase = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowercase__ ( self : Dict ) -> Optional[int]:
_lowerCAmelCase = BasicTokenizer(do_lower_case=__snake_case , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def lowercase__ ( self : Any ) -> Dict:
_lowerCAmelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
_lowerCAmelCase = {}
for i, token in enumerate(__snake_case ):
_lowerCAmelCase = i
_lowerCAmelCase = WordpieceTokenizer(vocab=__snake_case , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
@require_torch
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
_lowerCAmelCase = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
_lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_lowerCAmelCase = [10_37, 21_46, 2_04_23, 20_05, 76_80, 78_49, 39_89, 10_12, 1_02]
_lowerCAmelCase = tokenizer(__snake_case , padding=__snake_case , return_tensors="""pt""" )
self.assertIsInstance(__snake_case , __snake_case )
_lowerCAmelCase = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__snake_case , __snake_case )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def lowercase__ ( self : List[Any] ) -> Any:
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def lowercase__ ( self : str ) -> Union[str, Any]:
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def lowercase__ ( self : int ) -> int:
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
@slow
def lowercase__ ( self : int ) -> Optional[int]:
_lowerCAmelCase = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
_lowerCAmelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=__snake_case )
_lowerCAmelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__snake_case )
_lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__snake_case )
_lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
assert encoded_sentence == text + [1_02]
assert encoded_pair == text + [1_02] + text_a + [1_02]
| 207
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=snake_case_ ):
_lowercase: List[Any] = ['''torch''', '''scipy''']
def __init__( self : Tuple , *__snake_case : Dict , **__snake_case : List[str] ) -> str:
requires_backends(self , ["""torch""", """scipy"""] )
@classmethod
def lowercase__ ( cls : str , *__snake_case : List[str] , **__snake_case : str ) -> Tuple:
requires_backends(cls , ["""torch""", """scipy"""] )
@classmethod
def lowercase__ ( cls : Optional[int] , *__snake_case : int , **__snake_case : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ["""torch""", """scipy"""] )
| 207
| 1
|
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
a__ : Dict = logging.get_logger(__name__)
set_seed(7_7_0)
a__ : List[Any] = {
"c_attn": "att_proj",
"c_proj": "out_proj",
"c_fc": "in_proj",
"transformer.": "",
"h.": "layers.",
"ln_1": "layernorm_1",
"ln_2": "layernorm_2",
"ln_f": "layernorm_final",
"wpe": "position_embeds_layer",
"wte": "input_embeds_layer",
}
a__ : Dict = {
"text_small": {
"repo_id": "suno/bark",
"file_name": "text.pt",
},
"coarse_small": {
"repo_id": "suno/bark",
"file_name": "coarse.pt",
},
"fine_small": {
"repo_id": "suno/bark",
"file_name": "fine.pt",
},
"text": {
"repo_id": "suno/bark",
"file_name": "text_2.pt",
},
"coarse": {
"repo_id": "suno/bark",
"file_name": "coarse_2.pt",
},
"fine": {
"repo_id": "suno/bark",
"file_name": "fine_2.pt",
},
}
a__ : Union[str, Any] = os.path.dirname(os.path.abspath(__file__))
a__ : int = os.path.join(os.path.expanduser('''~'''), '''.cache''')
a__ : List[str] = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_=False ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = model_type
if use_small:
key += "_small"
return os.path.join(UpperCAmelCase__ , REMOTE_MODEL_PATHS[key]["file_name"] )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
hf_hub_download(repo_id=UpperCAmelCase__ , filename=UpperCAmelCase__ , local_dir=UpperCAmelCase__ )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_="text" ):
'''simple docstring'''
if model_type == "text":
__SCREAMING_SNAKE_CASE = BarkSemanticModel
__SCREAMING_SNAKE_CASE = BarkSemanticConfig
__SCREAMING_SNAKE_CASE = BarkSemanticGenerationConfig
elif model_type == "coarse":
__SCREAMING_SNAKE_CASE = BarkCoarseModel
__SCREAMING_SNAKE_CASE = BarkCoarseConfig
__SCREAMING_SNAKE_CASE = BarkCoarseGenerationConfig
elif model_type == "fine":
__SCREAMING_SNAKE_CASE = BarkFineModel
__SCREAMING_SNAKE_CASE = BarkFineConfig
__SCREAMING_SNAKE_CASE = BarkFineGenerationConfig
else:
raise NotImplementedError()
__SCREAMING_SNAKE_CASE = f"""{model_type}_small""" if use_small else model_type
__SCREAMING_SNAKE_CASE = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(UpperCAmelCase__ ):
logger.info(f"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info["repo_id"] , model_info["file_name"] )
__SCREAMING_SNAKE_CASE = torch.load(UpperCAmelCase__ , map_location=UpperCAmelCase__ )
# this is a hack
__SCREAMING_SNAKE_CASE = checkpoint["""model_args"""]
if "input_vocab_size" not in model_args:
__SCREAMING_SNAKE_CASE = model_args["""vocab_size"""]
__SCREAMING_SNAKE_CASE = model_args["""vocab_size"""]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
__SCREAMING_SNAKE_CASE = model_args.pop("n_head" )
__SCREAMING_SNAKE_CASE = model_args.pop("n_embd" )
__SCREAMING_SNAKE_CASE = model_args.pop("n_layer" )
__SCREAMING_SNAKE_CASE = ConfigClass(**checkpoint["model_args"] )
__SCREAMING_SNAKE_CASE = ModelClass(config=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = GenerationConfigClass()
__SCREAMING_SNAKE_CASE = model_generation_config
__SCREAMING_SNAKE_CASE = checkpoint["""model"""]
# fixup checkpoint
__SCREAMING_SNAKE_CASE = """_orig_mod."""
for k, v in list(state_dict.items() ):
if k.startswith(UpperCAmelCase__ ):
# replace part of the key with corresponding layer name in HF implementation
__SCREAMING_SNAKE_CASE = k[len(UpperCAmelCase__ ) :]
for old_layer_name in new_layer_name_dict:
__SCREAMING_SNAKE_CASE = new_k.replace(UpperCAmelCase__ , new_layer_name_dict[old_layer_name] )
__SCREAMING_SNAKE_CASE = state_dict.pop(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = set(state_dict.keys() ) - set(model.state_dict().keys() )
__SCREAMING_SNAKE_CASE = {k for k in extra_keys if not k.endswith(".attn.bias" )}
__SCREAMING_SNAKE_CASE = set(model.state_dict().keys() ) - set(state_dict.keys() )
__SCREAMING_SNAKE_CASE = {k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(UpperCAmelCase__ ) != 0:
raise ValueError(f"""extra keys found: {extra_keys}""" )
if len(UpperCAmelCase__ ) != 0:
raise ValueError(f"""missing keys: {missing_keys}""" )
model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model.num_parameters(exclude_embeddings=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = checkpoint["""best_val_loss"""].item()
logger.info(f"""model loaded: {round(n_params/1E6 , 1 )}M params, {round(UpperCAmelCase__ , 3 )} loss""" )
model.eval()
model.to(UpperCAmelCase__ )
del checkpoint, state_dict
return model
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_="text" ):
'''simple docstring'''
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
__SCREAMING_SNAKE_CASE = """cpu""" # do conversion on cpu
__SCREAMING_SNAKE_CASE = _get_ckpt_path(UpperCAmelCase__ , use_small=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = _load_model(UpperCAmelCase__ , UpperCAmelCase__ , model_type=UpperCAmelCase__ , use_small=UpperCAmelCase__ )
# load bark initial model
__SCREAMING_SNAKE_CASE = _bark_load_model(UpperCAmelCase__ , "cpu" , model_type=UpperCAmelCase__ , use_small=UpperCAmelCase__ )
if model_type == "text":
__SCREAMING_SNAKE_CASE = bark_model["""model"""]
if model.num_parameters(exclude_embeddings=UpperCAmelCase__ ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
__SCREAMING_SNAKE_CASE = 5
__SCREAMING_SNAKE_CASE = 10
if model_type in ["text", "coarse"]:
__SCREAMING_SNAKE_CASE = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
__SCREAMING_SNAKE_CASE = bark_model(UpperCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )
# take last logits
__SCREAMING_SNAKE_CASE = output_new_model_total.logits[:, [-1], :]
else:
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = 8
__SCREAMING_SNAKE_CASE = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = bark_model(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError("initial and new outputs are not equal" )
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
model.save_pretrained(UpperCAmelCase__ )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = BarkSemanticConfig.from_pretrained(os.path.join(UpperCAmelCase__ , "config.json" ) )
__SCREAMING_SNAKE_CASE = BarkCoarseConfig.from_pretrained(os.path.join(UpperCAmelCase__ , "config.json" ) )
__SCREAMING_SNAKE_CASE = BarkFineConfig.from_pretrained(os.path.join(UpperCAmelCase__ , "config.json" ) )
__SCREAMING_SNAKE_CASE = EncodecConfig.from_pretrained("facebook/encodec_24khz" )
__SCREAMING_SNAKE_CASE = BarkSemanticModel.from_pretrained(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = BarkCoarseModel.from_pretrained(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = BarkFineModel.from_pretrained(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = EncodecModel.from_pretrained("facebook/encodec_24khz" )
__SCREAMING_SNAKE_CASE = BarkConfig.from_sub_model_configs(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
__SCREAMING_SNAKE_CASE = BarkModel(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = semantic
__SCREAMING_SNAKE_CASE = coarseAcoustic
__SCREAMING_SNAKE_CASE = fineAcoustic
__SCREAMING_SNAKE_CASE = codec
__SCREAMING_SNAKE_CASE = bark_generation_config
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
bark.save_pretrained(UpperCAmelCase__ , repo_id=UpperCAmelCase__ , push_to_hub=UpperCAmelCase__ )
if __name__ == "__main__":
a__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
a__ : int = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 708
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase_ ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Union[str, Any] = DiTPipeline
snake_case__ : Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
snake_case__ : Any = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
snake_case__ : Optional[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
snake_case__ : Optional[Any] = False
def UpperCAmelCase_ ( self : Dict ) -> Dict:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = TransformeraDModel(
sample_size=1_6 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=UpperCAmelCase__ , activation_fn="gelu-approximate" , num_embeds_ada_norm=1_0_0_0 , norm_type="ada_norm_zero" , norm_elementwise_affine=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = AutoencoderKL()
__SCREAMING_SNAKE_CASE = DDIMScheduler()
__SCREAMING_SNAKE_CASE = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str=0 ) -> Optional[int]:
if str(UpperCAmelCase__ ).startswith("mps" ):
__SCREAMING_SNAKE_CASE = torch.manual_seed(UpperCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]:
__SCREAMING_SNAKE_CASE = "cpu"
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = self.pipeline_class(**UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = pipe(**UpperCAmelCase__ ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 1_6, 1_6, 3) )
__SCREAMING_SNAKE_CASE = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
__SCREAMING_SNAKE_CASE = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase__ , 1E-3 )
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
self._test_inference_batch_single_identical(relax_max_difference=UpperCAmelCase__ , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def UpperCAmelCase_ ( self : int ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
__SCREAMING_SNAKE_CASE = ["vase", "umbrella", "white shark", "white wolf"]
__SCREAMING_SNAKE_CASE = pipe.get_label_ids(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = pipe(UpperCAmelCase__ , generator=UpperCAmelCase__ , num_inference_steps=4_0 , output_type="np" ).images
for word, image in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = load_numpy(
F"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-2
def UpperCAmelCase_ ( self : List[str] ) -> List[Any]:
__SCREAMING_SNAKE_CASE = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
__SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
__SCREAMING_SNAKE_CASE = ["vase", "umbrella"]
__SCREAMING_SNAKE_CASE = pipe.get_label_ids(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(UpperCAmelCase__ , generator=UpperCAmelCase__ , num_inference_steps=2_5 , output_type="np" ).images
for word, image in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
F"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-1
| 553
| 0
|
"""simple docstring"""
import qiskit
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : int ) -> qiskit.result.counts.Counts:
_lowerCAmelCase : Optional[Any] = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
_lowerCAmelCase : Optional[int] = qiskit.QuantumCircuit(_lowerCamelCase ,_lowerCamelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] ,[0, 1] )
# Execute the circuit on the qasm simulator
_lowerCAmelCase : List[str] = qiskit.execute(_lowerCamelCase ,_lowerCamelCase ,shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_lowerCamelCase )
if __name__ == "__main__":
_a : Optional[int] = single_qubit_measure(2, 2)
print(F"""Total count for various states are: {counts}""")
| 213
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 1000 ) -> int:
return sum(2 * a * ((a - 1) // 2) for a in range(3 ,n + 1 ) )
if __name__ == "__main__":
print(solution())
| 213
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
UpperCamelCase__ : List[Any] = (3, 9, -11, 0, 7, 5, 1, -1)
UpperCamelCase__ : Dict = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class _UpperCamelCase :
'''simple docstring'''
_A : int
_A : Node | None
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[Any] , lowerCAmelCase__ : Iterable[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Node | None = None
for i in sorted(lowerCAmelCase__ , reverse=lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = Node(lowerCAmelCase__ , self.head )
def __iter__( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = self.head
while node:
yield node.data
__SCREAMING_SNAKE_CASE : Dict = node.next_node
def __len__( self : Dict ):
"""simple docstring"""
return sum(1 for _ in self )
def __str__( self : Optional[int] ):
"""simple docstring"""
return " -> ".join([str(lowerCAmelCase__ ) for node in self] )
def lowerCAmelCase_ ( _lowerCamelCase: SortedLinkedList , _lowerCamelCase: SortedLinkedList ):
return SortedLinkedList(list(_lowerCamelCase ) + list(_lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ : Union[str, Any] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 178
|
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCamelCase__ : Tuple = logging.get_logger(__name__)
UpperCamelCase__ : List[str] = {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''',
}
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : Union[str, Any] = '''t5'''
_A : Optional[Any] = ['''past_key_values''']
_A : Any = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : Union[str, Any] , lowerCAmelCase__ : List[str]=3_2_1_2_8 , lowerCAmelCase__ : Dict=5_1_2 , lowerCAmelCase__ : str=6_4 , lowerCAmelCase__ : Tuple=2_0_4_8 , lowerCAmelCase__ : Optional[Any]=6 , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : Any=8 , lowerCAmelCase__ : Dict=3_2 , lowerCAmelCase__ : Dict=1_2_8 , lowerCAmelCase__ : Any=0.1 , lowerCAmelCase__ : str=1E-6 , lowerCAmelCase__ : str=1.0 , lowerCAmelCase__ : Optional[Any]="relu" , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Optional[Any]=0 , lowerCAmelCase__ : List[Any]=1 , **lowerCAmelCase__ : Dict , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = vocab_size
__SCREAMING_SNAKE_CASE : str = d_model
__SCREAMING_SNAKE_CASE : str = d_kv
__SCREAMING_SNAKE_CASE : Optional[Any] = d_ff
__SCREAMING_SNAKE_CASE : Optional[Any] = num_layers
__SCREAMING_SNAKE_CASE : List[str] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__SCREAMING_SNAKE_CASE : Optional[int] = num_heads
__SCREAMING_SNAKE_CASE : Any = relative_attention_num_buckets
__SCREAMING_SNAKE_CASE : int = relative_attention_max_distance
__SCREAMING_SNAKE_CASE : Optional[int] = dropout_rate
__SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_epsilon
__SCREAMING_SNAKE_CASE : Optional[int] = initializer_factor
__SCREAMING_SNAKE_CASE : Any = feed_forward_proj
__SCREAMING_SNAKE_CASE : str = use_cache
__SCREAMING_SNAKE_CASE : List[str] = self.feed_forward_proj.split("""-""" )
__SCREAMING_SNAKE_CASE : Tuple = act_info[-1]
__SCREAMING_SNAKE_CASE : int = act_info[0] == """gated"""
if len(lowerCAmelCase__ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase__ ) > 2:
raise ValueError(
F"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__SCREAMING_SNAKE_CASE : List[Any] = """gelu_new"""
super().__init__(
pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ , )
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
@property
def UpperCamelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
__SCREAMING_SNAKE_CASE : Optional[Any] = """past_encoder_sequence + sequence"""
__SCREAMING_SNAKE_CASE : Optional[Any] = {0: """batch"""}
__SCREAMING_SNAKE_CASE : Optional[int] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
__SCREAMING_SNAKE_CASE : List[Any] = {0: """batch""", 1: """decoder_sequence"""}
__SCREAMING_SNAKE_CASE : Optional[Any] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase__ , direction="""inputs""" )
return common_inputs
@property
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
return 1_3
| 178
| 1
|
'''simple docstring'''
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[int] = StableDiffusionControlNetImgaImgPipeline
__lowercase : Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
__lowercase : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowercase : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} )
__lowercase : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def snake_case__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__A : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
__A : List[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
__A : List[str] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=__lowercase , set_alpha_to_one=__lowercase , )
torch.manual_seed(0 )
__A : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__A : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
__A : Optional[int] = CLIPTextModel(__lowercase )
__A : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__A : Any = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def snake_case__ ( self , __lowercase , __lowercase=0 ):
"""simple docstring"""
if str(__lowercase ).startswith('mps' ):
__A : Optional[Any] = torch.manual_seed(__lowercase )
else:
__A : Union[str, Any] = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__A : Union[str, Any] = 2
__A : Dict = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__lowercase , device=torch.device(__lowercase ) , )
__A : Any = floats_tensor(control_image.shape , rng=random.Random(__lowercase ) ).to(__lowercase )
__A : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__A : Union[str, Any] = Image.fromarray(np.uinta(__lowercase ) ).convert('RGB' ).resize((64, 64) )
__A : Union[str, Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def snake_case__ ( self ):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def snake_case__ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def snake_case__ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class _lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__lowercase : str = StableDiffusionControlNetImgaImgPipeline
__lowercase : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
__lowercase : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowercase : Dict = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def snake_case__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__A : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(__lowercase ):
if isinstance(__lowercase , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
__A : str = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__lowercase )
torch.manual_seed(0 )
__A : Union[str, Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__lowercase )
torch.manual_seed(0 )
__A : Optional[int] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=__lowercase , set_alpha_to_one=__lowercase , )
torch.manual_seed(0 )
__A : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__A : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
__A : Dict = CLIPTextModel(__lowercase )
__A : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__A : Optional[Any] = MultiControlNetModel([controlneta, controlneta] )
__A : Union[str, Any] = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def snake_case__ ( self , __lowercase , __lowercase=0 ):
"""simple docstring"""
if str(__lowercase ).startswith('mps' ):
__A : List[Any] = torch.manual_seed(__lowercase )
else:
__A : Any = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__A : Dict = 2
__A : Tuple = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__lowercase , device=torch.device(__lowercase ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__lowercase , device=torch.device(__lowercase ) , ),
]
__A : Dict = floats_tensor(control_image[0].shape , rng=random.Random(__lowercase ) ).to(__lowercase )
__A : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__A : List[str] = Image.fromarray(np.uinta(__lowercase ) ).convert('RGB' ).resize((64, 64) )
__A : int = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def snake_case__ ( self ):
"""simple docstring"""
__A : str = self.get_dummy_components()
__A : List[str] = self.pipeline_class(**__lowercase )
pipe.to(__lowercase )
__A : str = 1_0.0
__A : List[Any] = 4
__A : Tuple = self.get_dummy_inputs(__lowercase )
__A : int = steps
__A : Any = scale
__A : List[Any] = pipe(**__lowercase )[0]
__A : Optional[Any] = self.get_dummy_inputs(__lowercase )
__A : Union[str, Any] = steps
__A : Union[str, Any] = scale
__A : Tuple = pipe(**__lowercase , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
__A : Any = self.get_dummy_inputs(__lowercase )
__A : List[Any] = steps
__A : Tuple = scale
__A : Any = pipe(**__lowercase , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
__A : Optional[Any] = self.get_dummy_inputs(__lowercase )
__A : str = steps
__A : Dict = scale
__A : str = pipe(**__lowercase , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def snake_case__ ( self ):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def snake_case__ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def snake_case__ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def snake_case__ ( self ):
"""simple docstring"""
__A : Optional[int] = self.get_dummy_components()
__A : List[Any] = self.pipeline_class(**__lowercase )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(__lowercase )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self ):
"""simple docstring"""
__A : Tuple = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny' )
__A : int = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , safety_checker=__lowercase , controlnet=__lowercase )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__lowercase )
__A : Tuple = torch.Generator(device='cpu' ).manual_seed(0 )
__A : Optional[int] = "evil space-punk bird"
__A : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ).resize((512, 512) )
__A : List[str] = load_image(
'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png' ).resize((512, 512) )
__A : Any = pipe(
__lowercase , __lowercase , control_image=__lowercase , generator=__lowercase , output_type='np' , num_inference_steps=50 , strength=0.6 , )
__A : Dict = output.images[0]
assert image.shape == (512, 512, 3)
__A : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy' )
assert np.abs(expected_image - image ).max() < 9E-2
| 365
|
def __snake_case ( _lowerCAmelCase : int ) -> bool:
if num < 0:
return False
A_ : int = num
A_ : int = 0
while num > 0:
A_ : str = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 454
| 0
|
from sklearn.metrics import matthews_corrcoef
import datasets
lowercase : Dict = """
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
"""
lowercase : Tuple = """
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results['matthews_correlation'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results['matthews_correlation'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results['matthews_correlation'], 2))
-0.25
"""
lowercase : List[str] = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def lowerCAmelCase ( self : Union[str, Any] , A_ : Any , A_ : Any , A_ : Union[str, Any]=None ) -> List[str]:
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(A_ , A_ , sample_weight=A_ ) ),
}
| 584
|
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
lowercase : str = sys.version_info >= (3, 1_0)
def UpperCAmelCase_ ( _UpperCAmelCase=None , _UpperCAmelCase=None ):
return field(default_factory=lambda: default , metadata=_UpperCAmelCase )
@dataclass
class a__ :
_A = 42
_A = 42
_A = 42
_A = 42
@dataclass
class a__ :
_A = 42
_A = field(default="toto" , metadata={"help": "help message"} )
@dataclass
class a__ :
_A = False
_A = True
_A = None
class a__ ( __SCREAMING_SNAKE_CASE ):
_A = "titi"
_A = "toto"
class a__ ( __SCREAMING_SNAKE_CASE ):
_A = "titi"
_A = "toto"
_A = 42
@dataclass
class a__ :
_A = "toto"
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_: Any = BasicEnum(self.foo )
@dataclass
class a__ :
_A = "toto"
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_: Optional[int] = MixedTypeEnum(self.foo )
@dataclass
class a__ :
_A = None
_A = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "help message"} )
_A = None
_A = list_field(default=[] )
_A = list_field(default=[] )
@dataclass
class a__ :
_A = list_field(default=[] )
_A = list_field(default=[1, 2, 3] )
_A = list_field(default=["Hallo", "Bonjour", "Hello"] )
_A = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class a__ :
_A = field()
_A = field()
_A = field()
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_: int = BasicEnum(self.required_enum )
@dataclass
class a__ :
_A = 42
_A = field()
_A = None
_A = field(default="toto" , metadata={"help": "help message"} )
_A = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class a__ :
_A = False
_A = True
_A = None
@dataclass
class a__ :
_A = None
_A = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "help message"} )
_A = None
_A = list_field(default=[] )
_A = list_field(default=[] )
class a__ ( unittest.TestCase ):
def lowerCAmelCase ( self : Any , A_ : argparse.ArgumentParser , A_ : argparse.ArgumentParser ) -> List[Any]:
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
lowerCamelCase_: Any = {k: v for k, v in vars(A_ ).items() if k != """container"""}
lowerCamelCase_: Any = {k: v for k, v in vars(A_ ).items() if k != """container"""}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("""choices""" , A_ ) and yy.get("""choices""" , A_ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["""type"""](A_ ) , yy["""type"""](A_ ) )
del xx["type"], yy["type"]
self.assertEqual(A_ , A_ )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_: str = HfArgumentParser(A_ )
lowerCamelCase_: str = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=A_ , required=A_ )
expected.add_argument("""--bar""" , type=A_ , required=A_ )
expected.add_argument("""--baz""" , type=A_ , required=A_ )
expected.add_argument("""--flag""" , type=A_ , default=A_ , const=A_ , nargs="""?""" )
self.argparsersEqual(A_ , A_ )
lowerCamelCase_: List[Any] = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""]
((lowerCamelCase_) , ): int = parser.parse_args_into_dataclasses(A_ , look_for_args_file=A_ )
self.assertFalse(example.flag )
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
lowerCamelCase_: int = HfArgumentParser(A_ )
lowerCamelCase_: int = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=42 , type=A_ )
expected.add_argument("""--baz""" , default="""toto""" , type=A_ , help="""help message""" )
self.argparsersEqual(A_ , A_ )
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
lowerCamelCase_: Optional[Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=A_ , default=A_ , const=A_ , nargs="""?""" )
expected.add_argument("""--baz""" , type=A_ , default=A_ , const=A_ , nargs="""?""" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("""--no_baz""" , action="""store_false""" , default=A_ , dest="""baz""" )
expected.add_argument("""--opt""" , type=A_ , default=A_ )
lowerCamelCase_: int = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(A_ )
for dataclass_type in dataclass_types:
lowerCamelCase_: Any = HfArgumentParser(A_ )
self.argparsersEqual(A_ , A_ )
lowerCamelCase_: int = parser.parse_args([] )
self.assertEqual(A_ , Namespace(foo=A_ , baz=A_ , opt=A_ ) )
lowerCamelCase_: List[str] = parser.parse_args(["""--foo""", """--no_baz"""] )
self.assertEqual(A_ , Namespace(foo=A_ , baz=A_ , opt=A_ ) )
lowerCamelCase_: List[str] = parser.parse_args(["""--foo""", """--baz"""] )
self.assertEqual(A_ , Namespace(foo=A_ , baz=A_ , opt=A_ ) )
lowerCamelCase_: List[Any] = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] )
self.assertEqual(A_ , Namespace(foo=A_ , baz=A_ , opt=A_ ) )
lowerCamelCase_: int = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] )
self.assertEqual(A_ , Namespace(foo=A_ , baz=A_ , opt=A_ ) )
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
lowerCamelCase_: int = HfArgumentParser(A_ )
lowerCamelCase_: str = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(A_ , A_ )
lowerCamelCase_: Union[str, Any] = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
lowerCamelCase_: List[str] = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
lowerCamelCase_: List[Any] = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
lowerCamelCase_: Union[str, Any] = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
lowerCamelCase_: Any = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
lowerCamelCase_: Union[str, Any] = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
@dataclass
class a__ :
_A = "toto"
lowerCamelCase_: Union[str, Any] = HfArgumentParser(A_ )
lowerCamelCase_: List[Any] = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(A_ , A_ )
lowerCamelCase_: List[str] = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
lowerCamelCase_: Any = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
lowerCamelCase_: Optional[Any] = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
def lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
lowerCamelCase_: List[Any] = HfArgumentParser(A_ )
lowerCamelCase_: Optional[Any] = argparse.ArgumentParser()
expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=A_ )
expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=A_ )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=A_ )
expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=A_ )
self.argparsersEqual(A_ , A_ )
lowerCamelCase_: List[str] = parser.parse_args([] )
self.assertEqual(
A_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , )
lowerCamelCase_: Union[str, Any] = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() )
self.assertEqual(A_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) )
def lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_: Dict = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=A_ , type=A_ )
expected.add_argument("""--bar""" , default=A_ , type=A_ , help="""help message""" )
expected.add_argument("""--baz""" , default=A_ , type=A_ )
expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=A_ )
expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=A_ )
lowerCamelCase_: Union[str, Any] = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(A_ )
for dataclass_type in dataclass_types:
lowerCamelCase_: Tuple = HfArgumentParser(A_ )
self.argparsersEqual(A_ , A_ )
lowerCamelCase_: Union[str, Any] = parser.parse_args([] )
self.assertEqual(A_ , Namespace(foo=A_ , bar=A_ , baz=A_ , ces=[] , des=[] ) )
lowerCamelCase_: Optional[int] = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() )
self.assertEqual(A_ , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) )
def lowerCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
lowerCamelCase_: Optional[int] = HfArgumentParser(A_ )
lowerCamelCase_: Optional[int] = argparse.ArgumentParser()
expected.add_argument("""--required_list""" , nargs="""+""" , type=A_ , required=A_ )
expected.add_argument("""--required_str""" , type=A_ , required=A_ )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=A_ , )
self.argparsersEqual(A_ , A_ )
def lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_: Any = HfArgumentParser(A_ )
lowerCamelCase_: Tuple = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=A_ , required=A_ )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=A_ , )
expected.add_argument("""--opt""" , type=A_ , default=A_ )
expected.add_argument("""--baz""" , default="""toto""" , type=A_ , help="""help message""" )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=A_ )
self.argparsersEqual(A_ , A_ )
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
lowerCamelCase_: Tuple = HfArgumentParser(A_ )
lowerCamelCase_: List[Any] = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
lowerCamelCase_: Optional[int] = parser.parse_dict(A_ )[0]
lowerCamelCase_: Optional[Any] = BasicExample(**A_ )
self.assertEqual(A_ , A_ )
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_: Optional[int] = HfArgumentParser(A_ )
lowerCamelCase_: Tuple = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
"""extra""": 42,
}
self.assertRaises(A_ , parser.parse_dict , A_ , allow_extra_keys=A_ )
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_: Union[str, Any] = HfArgumentParser(A_ )
lowerCamelCase_: int = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase_: Union[str, Any] = os.path.join(A_ , """temp_json""" )
os.mkdir(A_ )
with open(temp_local_path + """.json""" , """w+""" ) as f:
json.dump(A_ , A_ )
lowerCamelCase_: List[str] = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0]
lowerCamelCase_: List[str] = BasicExample(**A_ )
self.assertEqual(A_ , A_ )
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCamelCase_: str = HfArgumentParser(A_ )
lowerCamelCase_: List[Any] = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase_: Tuple = os.path.join(A_ , """temp_yaml""" )
os.mkdir(A_ )
with open(temp_local_path + """.yaml""" , """w+""" ) as f:
yaml.dump(A_ , A_ )
lowerCamelCase_: List[Any] = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0]
lowerCamelCase_: str = BasicExample(**A_ )
self.assertEqual(A_ , A_ )
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
lowerCamelCase_: List[str] = HfArgumentParser(A_ )
self.assertIsNotNone(A_ )
| 584
| 1
|
'''simple docstring'''
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def lowerCAmelCase_ ( ) -> None:
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 591
|
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ : str = logging.get_logger(__name__)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"encoder.deit.blocks.{i}.norm1.weight", F"encoder.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"encoder.deit.blocks.{i}.norm1.bias", F"encoder.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.attn.proj.weight", F"encoder.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.attn.proj.bias", F"encoder.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.norm2.weight", F"encoder.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"encoder.deit.blocks.{i}.norm2.bias", F"encoder.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc1.weight", F"encoder.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc1.bias", F"encoder.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc2.weight", F"encoder.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"encoder.deit.blocks.{i}.mlp.fc2.bias", F"encoder.encoder.layer.{i}.output.dense.bias") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
"""simple docstring"""
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
_SCREAMING_SNAKE_CASE = state_dict.pop(F"encoder.deit.blocks.{i}.attn.qkv.weight" )
_SCREAMING_SNAKE_CASE = in_proj_weight[
: encoder_config.hidden_size, :
]
_SCREAMING_SNAKE_CASE = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
_SCREAMING_SNAKE_CASE = in_proj_weight[
-encoder_config.hidden_size :, :
]
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = dct.pop(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = val
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Any:
"""simple docstring"""
if "handwritten" in checkpoint_url:
_SCREAMING_SNAKE_CASE = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
_SCREAMING_SNAKE_CASE = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
_SCREAMING_SNAKE_CASE = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ViTConfig(image_size=3_84 , qkv_bias=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
_SCREAMING_SNAKE_CASE = 7_68
elif "large" in checkpoint_url:
# use ViT-large encoder
_SCREAMING_SNAKE_CASE = 10_24
_SCREAMING_SNAKE_CASE = 40_96
_SCREAMING_SNAKE_CASE = 24
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = 10_24
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = """relu"""
_SCREAMING_SNAKE_CASE = 10_24
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
# load HuggingFace model
_SCREAMING_SNAKE_CASE = ViTModel(SCREAMING_SNAKE_CASE_ , add_pooling_layer=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = TrOCRForCausalLM(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = VisionEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
model.eval()
# load state_dict of original model, rename some keys
_SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" , check_hash=SCREAMING_SNAKE_CASE_ )["""model"""]
_SCREAMING_SNAKE_CASE = create_rename_keys(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
_SCREAMING_SNAKE_CASE = state_dict.pop(SCREAMING_SNAKE_CASE_ )
if key.startswith("""decoder""" ) and "output_projection" not in key:
_SCREAMING_SNAKE_CASE = val
else:
_SCREAMING_SNAKE_CASE = val
# load state dict
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image
_SCREAMING_SNAKE_CASE = ViTImageProcessor(size=encoder_config.image_size )
_SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained("""roberta-large""" )
_SCREAMING_SNAKE_CASE = TrOCRProcessor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = processor(images=prepare_img(SCREAMING_SNAKE_CASE_ ) , return_tensors="""pt""" ).pixel_values
# verify logits
_SCREAMING_SNAKE_CASE = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
_SCREAMING_SNAKE_CASE = model(pixel_values=SCREAMING_SNAKE_CASE_ , decoder_input_ids=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = outputs.logits
_SCREAMING_SNAKE_CASE = torch.Size([1, 1, 5_02_65] )
if "trocr-base-handwritten" in checkpoint_url:
_SCREAMING_SNAKE_CASE = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
_SCREAMING_SNAKE_CASE = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
_SCREAMING_SNAKE_CASE = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
_SCREAMING_SNAKE_CASE = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ), "First elements of logits not as expected"
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
UpperCamelCase__ : Dict = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
UpperCamelCase__ : Dict = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 591
| 1
|
'''simple docstring'''
import warnings
warnings.warn(
"memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: "
"`from accelerate import find_executable_batch_size` to avoid this warning.",
FutureWarning,
)
| 714
|
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('''repo_id''' , ['''canonical_dataset_name''', '''org-name/dataset-name'''] )
@pytest.mark.parametrize('''path''' , ['''filename.csv''', '''filename with blanks.csv'''] )
@pytest.mark.parametrize('''revision''' , [None, '''v2'''] )
def lowerCamelCase_ ( A_ , A_ , A_ ):
__lowerCamelCase = hf_hub_url(repo_id=A_ , path=A_ , revision=A_ )
assert url == f'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(A_ )}'''
| 575
| 0
|
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def a (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None ):
if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release:
# old versions of hfh don't url-encode the file path
SCREAMING_SNAKE_CASE_ = quote(_lowercase )
return hfh.hf_hub_url(_lowercase , _lowercase , repo_type='''dataset''' , revision=_lowercase )
| 234
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
__A = logging.get_logger(__name__)
@add_end_docstrings(snake_case )
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
def __init__( self: int , *__A: Tuple , **__A: Optional[Any] ) -> Tuple:
super().__init__(*__A , **__A )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def __A ( self: Tuple , __A: Union[str, Any]=None , __A: int=None , __A: Optional[Any]=None ) -> Optional[Any]:
_A = {}
_A = {}
if prompt is not None:
_A = prompt
if generate_kwargs is not None:
_A = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_A = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''' )
_A = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self: Any , __A: Union[str, List[str], "Image.Image", List["Image.Image"]] , **__A: str ) -> Dict:
return super().__call__(__A , **__A )
def __A ( self: Any , __A: Dict , __A: int=None ) -> int:
_A = load_image(__A )
if prompt is not None:
if not isinstance(__A , __A ):
raise ValueError(
f"""Received an invalid text input, got - {type(__A )} - but expected a single string. """
'''Note also that one single text can be provided for conditional image to text generation.''' )
_A = self.model.config.model_type
if model_type == "git":
_A = self.image_processor(images=__A , return_tensors=self.framework )
_A = self.tokenizer(text=__A , add_special_tokens=__A ).input_ids
_A = [self.tokenizer.cls_token_id] + input_ids
_A = torch.tensor(__A ).unsqueeze(0 )
model_inputs.update({'''input_ids''': input_ids} )
elif model_type == "pix2struct":
_A = self.image_processor(images=__A , header_text=__A , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_A = self.image_processor(images=__A , return_tensors=self.framework )
_A = self.tokenizer(__A , return_tensors=self.framework )
model_inputs.update(__A )
else:
raise ValueError(f"""Model type {model_type} does not support conditional text generation""" )
else:
_A = self.image_processor(images=__A , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
_A = None
return model_inputs
def __A ( self: Optional[int] , __A: Optional[Any] , __A: Optional[int]=None ) -> Any:
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''] , __A )
and all(x is None for x in model_inputs['''input_ids'''] )
):
_A = None
if generate_kwargs is None:
_A = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_A = model_inputs.pop(self.model.main_input_name )
_A = self.model.generate(__A , **__A , **__A )
return model_outputs
def __A ( self: str , __A: Optional[int] ) -> Optional[int]:
_A = []
for output_ids in model_outputs:
_A = {
'''generated_text''': self.tokenizer.decode(
__A , skip_special_tokens=__A , )
}
records.append(__A )
return records
| 484
| 0
|
import argparse
import datetime
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : Any = {
"0": "Sunday",
"1": "Monday",
"2": "Tuesday",
"3": "Wednesday",
"4": "Thursday",
"5": "Friday",
"6": "Saturday",
}
_lowerCAmelCase : int = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(lowerCAmelCase__ ) < 11:
raise ValueError("Must be 10 characters long" )
# Get month
_lowerCAmelCase : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("Month must be between 1 - 12" )
_lowerCAmelCase : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'" )
# Get day
_lowerCAmelCase : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("Date must be between 1 - 31" )
# Get second separator
_lowerCAmelCase : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'" )
# Get year
_lowerCAmelCase : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 85_00:
raise ValueError(
"Year out of range. There has to be some sort of limit...right?" )
# Get datetime obj for validation
_lowerCAmelCase : str = datetime.date(int(lowerCAmelCase__ ) , int(lowerCAmelCase__ ) , int(lowerCAmelCase__ ) )
# Start math
if m <= 2:
_lowerCAmelCase : Any = y - 1
_lowerCAmelCase : str = m + 12
# maths var
_lowerCAmelCase : int = int(str(lowerCAmelCase__ )[:2] )
_lowerCAmelCase : int = int(str(lowerCAmelCase__ )[2:] )
_lowerCAmelCase : int = int(2.6 * m - 5.39 )
_lowerCAmelCase : int = int(c / 4 )
_lowerCAmelCase : int = int(k / 4 )
_lowerCAmelCase : int = int(d + k )
_lowerCAmelCase : int = int(t + u + v + x )
_lowerCAmelCase : int = int(z - (2 * c) )
_lowerCAmelCase : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("The date was evaluated incorrectly. Contact developer." )
# Response
_lowerCAmelCase : str = f"""Your date {date_input}, is a {days[str(lowerCAmelCase__ )]}!"""
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case = argparse.ArgumentParser(
description=(
"Find out what day of the week nearly any date is or was. Enter "
"date as a string in the mm-dd-yyyy or mm/dd/yyyy format"
)
)
parser.add_argument(
"date_input", type=str, help="Date as a string (mm-dd-yyyy or mm/dd/yyyy)"
)
snake_case = parser.parse_args()
zeller(args.date_input)
| 713
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = "▁"
snake_case = {"vocab_file": "sentencepiece.bpe.model"}
snake_case = {
"vocab_file": {
"facebook/mbart-large-50-one-to-many-mmt": (
"https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"
),
}
}
snake_case = {
"facebook/mbart-large-50-one-to-many-mmt": 1024,
}
# fmt: off
snake_case = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"]
class __A ( snake_case__ ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = ['''input_ids''', '''attention_mask''']
a_ = []
a_ = []
def __init__( self , _snake_case , _snake_case=None , _snake_case=None , _snake_case="</s>" , _snake_case="</s>" , _snake_case="<s>" , _snake_case="<unk>" , _snake_case="<pad>" , _snake_case="<mask>" , _snake_case = None , **_snake_case , ):
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase : Dict = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
_lowerCAmelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCAmelCase : int = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=_snake_case , tgt_lang=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , pad_token=_snake_case , mask_token=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
_lowerCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_snake_case ) )
_lowerCAmelCase : Optional[int] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_lowerCAmelCase : Tuple = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_lowerCAmelCase : List[str] = 1
_lowerCAmelCase : Union[str, Any] = len(self.sp_model )
_lowerCAmelCase : int = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_snake_case )
}
_lowerCAmelCase : Union[str, Any] = {v: k for k, v in self.lang_code_to_id.items()}
_lowerCAmelCase : List[str] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_lowerCAmelCase : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_lowerCAmelCase : List[Any] = src_lang if src_lang is not None else "en_XX"
_lowerCAmelCase : Optional[Any] = self.lang_code_to_id[self._src_lang]
_lowerCAmelCase : Dict = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
_lowerCAmelCase : Optional[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
_lowerCAmelCase : List[Any] = self.__dict__.copy()
_lowerCAmelCase : Dict = None
return state
def __setstate__( self , _snake_case ):
_lowerCAmelCase : Tuple = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_lowerCAmelCase : Tuple = {}
_lowerCAmelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Optional[Any] = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
return self.sp_model.encode(_snake_case , out_type=_snake_case )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCAmelCase : Optional[Any] = self.sp_model.PieceToId(_snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
_lowerCAmelCase : str = []
_lowerCAmelCase : Optional[Any] = ""
_lowerCAmelCase : Any = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_snake_case ) + token
_lowerCAmelCase : int = True
_lowerCAmelCase : int = []
else:
current_sub_tokens.append(_snake_case )
_lowerCAmelCase : Any = False
out_string += self.sp_model.decode(_snake_case )
return out_string.strip()
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case = None ):
if not os.path.isdir(_snake_case ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : Union[str, Any] = os.path.join(
_snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case , "wb" ) as fi:
_lowerCAmelCase : int = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case = None , _snake_case = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case )
_lowerCAmelCase : Any = [1] * len(self.prefix_tokens )
_lowerCAmelCase : Union[str, Any] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_snake_case )) + suffix_ones
return prefix_ones + ([0] * len(_snake_case )) + ([0] * len(_snake_case )) + suffix_ones
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , **_snake_case ):
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
_lowerCAmelCase : Dict = src_lang
_lowerCAmelCase : Optional[Any] = self(_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , **_snake_case )
_lowerCAmelCase : Optional[int] = self.convert_tokens_to_ids(_snake_case )
_lowerCAmelCase : Optional[Any] = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case = "en_XX" , _snake_case = None , _snake_case = "ro_RO" , **_snake_case , ):
_lowerCAmelCase : Union[str, Any] = src_lang
_lowerCAmelCase : List[str] = tgt_lang
return super().prepare_seqaseq_batch(_snake_case , _snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE__ ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
_lowerCAmelCase : List[str] = self.lang_code_to_id[src_lang]
_lowerCAmelCase : List[str] = [self.cur_lang_code_id]
_lowerCAmelCase : Optional[int] = [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
_lowerCAmelCase : int = self.lang_code_to_id[tgt_lang]
_lowerCAmelCase : List[str] = [self.cur_lang_code_id]
_lowerCAmelCase : int = [self.eos_token_id]
| 587
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A_ :
'''simple docstring'''
def __init__( self , A_ , A_=3 , A_=32 , A_=3 , A_=10 , A_=[10, 20, 30, 40] , A_=[1, 1, 2, 1] , A_=True , A_=True , A_="relu" , A_=3 , A_=None , ):
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = num_channels
_UpperCamelCase = embeddings_size
_UpperCamelCase = hidden_sizes
_UpperCamelCase = depths
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = hidden_act
_UpperCamelCase = num_labels
_UpperCamelCase = scope
_UpperCamelCase = len(A_ )
def a ( self ):
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCamelCase = self.get_config()
return config, pixel_values, labels
def a ( self ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def a ( self , A_ , A_ , A_ ):
_UpperCamelCase = RegNetModel(config=A_ )
model.to(A_ )
model.eval()
_UpperCamelCase = model(A_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a ( self , A_ , A_ , A_ ):
_UpperCamelCase = self.num_labels
_UpperCamelCase = RegNetForImageClassification(A_ )
model.to(A_ )
model.eval()
_UpperCamelCase = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a ( self ):
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
_lowerCAmelCase = (
{"""feature-extraction""": RegNetModel, """image-classification""": RegNetForImageClassification}
if is_torch_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def a ( self ):
_UpperCamelCase = RegNetModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=A_ , has_text_modality=A_ )
def a ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a ( self ):
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def a ( self ):
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def a ( self ):
pass
def a ( self ):
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(A_ )
_UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , A_ )
def a ( self ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def a ( self ):
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(config=A_ )
for name, module in model.named_modules():
if isinstance(A_ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
def a ( self ):
def check_hidden_states_output(A_ , A_ , A_ ):
_UpperCamelCase = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
_UpperCamelCase = model(**self._prepare_for_class(A_ , A_ ) )
_UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(A_ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
_UpperCamelCase = layer_type
_UpperCamelCase = True
check_hidden_states_output(A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase = True
check_hidden_states_output(A_ , A_ , A_ )
def a ( self ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@slow
def a ( self ):
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = RegNetModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def lowercase__( )-> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a ( self ):
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def a ( self ):
_UpperCamelCase = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(A_ )
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=A_ , return_tensors="pt" ).to(A_ )
# forward pass
with torch.no_grad():
_UpperCamelCase = model(**A_ )
# verify the logits
_UpperCamelCase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , A_ )
_UpperCamelCase = torch.tensor([-0.4180, -1.5051, -3.4836] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1e-4 ) )
| 138
|
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowercase__( _UpperCamelCase : Optional[int] , _UpperCamelCase : bool = True , _UpperCamelCase : float = math.inf , _UpperCamelCase : float = -math.inf , _UpperCamelCase : float = math.inf , _UpperCamelCase : float = -math.inf , _UpperCamelCase : bool = False , _UpperCamelCase : float = 100 , _UpperCamelCase : float = 0.01 , _UpperCamelCase : float = 1 , )-> Any:
"""simple docstring"""
_UpperCamelCase = False
_UpperCamelCase = search_prob
_UpperCamelCase = start_temperate
_UpperCamelCase = []
_UpperCamelCase = 0
_UpperCamelCase = None
while not search_end:
_UpperCamelCase = current_state.score()
if best_state is None or current_score > best_state.score():
_UpperCamelCase = current_state
scores.append(_UpperCamelCase )
iterations += 1
_UpperCamelCase = None
_UpperCamelCase = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_UpperCamelCase = random.randint(0 , len(_UpperCamelCase ) - 1 ) # picking a random neighbor
_UpperCamelCase = neighbors.pop(_UpperCamelCase )
_UpperCamelCase = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_UpperCamelCase = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_UpperCamelCase = picked_neighbor
else:
_UpperCamelCase = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_UpperCamelCase = picked_neighbor
_UpperCamelCase = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_UpperCamelCase = True
else:
_UpperCamelCase = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_UpperCamelCase ) , _UpperCamelCase )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def lowercase__( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] )-> Any:
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
snake_case_ : Optional[Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
snake_case_ : Any = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
snake_case_ : Any = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
snake_case_ : List[Any] = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def lowercase__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[Any] )-> Any:
"""simple docstring"""
return (3 * x**2) - (6 * y)
snake_case_ : Any = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
snake_case_ : Any = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F"""{local_min.score()}"""
)
snake_case_ : Union[str, Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
snake_case_ : int = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F"""{local_min.score()}"""
)
| 138
| 1
|
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
SCREAMING_SNAKE_CASE_ = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> List[Any]:
a_ : Union[str, Any] = True
a_ : Any = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_lowercase, _lowercase, _lowercase )
order.append(_lowercase )
return order
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> List[str]:
a_ : Optional[Any] = True
a_ : Dict = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_lowercase, _lowercase, _lowercase )
return component
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> List[Any]:
a_ : int = len(_lowercase ) * [False]
a_ : Tuple = {vert: [] for vert in range(len(_lowercase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_lowercase )
a_ : Union[str, Any] = []
for i, was_visited in enumerate(_lowercase ):
if not was_visited:
order += topology_sort(_lowercase, _lowercase, _lowercase )
a_ : List[Any] = []
a_ : Tuple = len(_lowercase ) * [False]
for i in range(len(_lowercase ) ):
a_ : List[str] = order[len(_lowercase ) - i - 1]
if not visited[vert]:
a_ : Any = find_components(_lowercase, _lowercase, _lowercase )
components_list.append(_lowercase )
return components_list
| 718
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
SCREAMING_SNAKE_CASE_ = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 370
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'microsoft/focalnet-tiny': 'https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json',
}
class lowerCamelCase (_snake_case , _snake_case ):
'''simple docstring'''
_snake_case : Union[str, Any] = '''focalnet'''
def __init__( self , _UpperCamelCase=2_2_4 , _UpperCamelCase=4 , _UpperCamelCase=3 , _UpperCamelCase=9_6 , _UpperCamelCase=False , _UpperCamelCase=[1_9_2, 3_8_4, 7_6_8, 7_6_8] , _UpperCamelCase=[2, 2, 6, 2] , _UpperCamelCase=[2, 2, 2, 2] , _UpperCamelCase=[3, 3, 3, 3] , _UpperCamelCase="gelu" , _UpperCamelCase=4.0 , _UpperCamelCase=0.0 , _UpperCamelCase=0.1 , _UpperCamelCase=False , _UpperCamelCase=1E-4 , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=0.02 , _UpperCamelCase=1E-5 , _UpperCamelCase=3_2 , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase , ) -> int:
super().__init__(**_UpperCamelCase )
UpperCAmelCase_ : Tuple = image_size
UpperCAmelCase_ : List[str] = patch_size
UpperCAmelCase_ : Tuple = num_channels
UpperCAmelCase_ : Union[str, Any] = embed_dim
UpperCAmelCase_ : Union[str, Any] = use_conv_embed
UpperCAmelCase_ : Tuple = hidden_sizes
UpperCAmelCase_ : Union[str, Any] = depths
UpperCAmelCase_ : List[str] = focal_levels
UpperCAmelCase_ : str = focal_windows
UpperCAmelCase_ : List[Any] = hidden_act
UpperCAmelCase_ : Union[str, Any] = mlp_ratio
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = drop_path_rate
UpperCAmelCase_ : List[Any] = use_layerscale
UpperCAmelCase_ : str = layerscale_value
UpperCAmelCase_ : Optional[int] = use_post_layernorm
UpperCAmelCase_ : Union[str, Any] = use_post_layernorm_in_modulation
UpperCAmelCase_ : int = normalize_modulator
UpperCAmelCase_ : Dict = initializer_range
UpperCAmelCase_ : List[str] = layer_norm_eps
UpperCAmelCase_ : Optional[Any] = encoder_stride
UpperCAmelCase_ : Optional[Any] = ['stem'] + [f"stage{idx}" for idx in range(1 , len(self.depths ) + 1 )]
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = get_aligned_output_features_output_indices(
out_features=_UpperCamelCase , out_indices=_UpperCamelCase , stage_names=self.stage_names )
| 406
|
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class lowerCamelCase (ctypes.Structure ):
'''simple docstring'''
_snake_case : str = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)]
def lowercase__ ( ):
'''simple docstring'''
if os.name == "nt":
UpperCAmelCase_ : Optional[int] = CursorInfo()
UpperCAmelCase_ : List[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__snake_case , ctypes.byref(__snake_case ) )
UpperCAmelCase_ : Any = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(__snake_case , ctypes.byref(__snake_case ) )
elif os.name == "posix":
sys.stdout.write('\033[?25l' )
sys.stdout.flush()
def lowercase__ ( ):
'''simple docstring'''
if os.name == "nt":
UpperCAmelCase_ : Tuple = CursorInfo()
UpperCAmelCase_ : Optional[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__snake_case , ctypes.byref(__snake_case ) )
UpperCAmelCase_ : Optional[Any] = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(__snake_case , ctypes.byref(__snake_case ) )
elif os.name == "posix":
sys.stdout.write('\033[?25h' )
sys.stdout.flush()
@contextmanager
def lowercase__ ( ):
'''simple docstring'''
try:
hide_cursor()
yield
finally:
show_cursor()
| 406
| 1
|
__UpperCamelCase : str = '0.21.0'
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 641
|
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__UpperCamelCase : str = 'true'
def snake_case_ ( __lowercase , __lowercase=8_2 , __lowercase=1_6 ):
set_seed(4_2 )
UpperCAmelCase_ : Optional[int] = RegressionModel()
UpperCAmelCase_ : Optional[int] = deepcopy(__lowercase )
UpperCAmelCase_ : Union[str, Any] = RegressionDataset(length=__lowercase )
UpperCAmelCase_ : Any = DataLoader(__lowercase , batch_size=__lowercase )
model.to(accelerator.device )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = accelerator.prepare(__lowercase , __lowercase )
return model, ddp_model, dataloader
def snake_case_ ( __lowercase , __lowercase=False ):
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
UpperCAmelCase_ : List[Any] = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(__lowercase ):
UpperCAmelCase_ : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowercase , max_length=__lowercase )
return outputs
with accelerator.main_process_first():
UpperCAmelCase_ : List[str] = dataset.map(
__lowercase , batched=__lowercase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
UpperCAmelCase_ : Any = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowercase ):
if use_longest:
return tokenizer.pad(__lowercase , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(__lowercase , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' )
return DataLoader(__lowercase , shuffle=__lowercase , collate_fn=__lowercase , batch_size=1_6 )
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : Optional[int] = Accelerator(dispatch_batches=__lowercase , split_batches=__lowercase )
UpperCAmelCase_ : int = get_dataloader(__lowercase , not dispatch_batches )
UpperCAmelCase_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(__lowercase , __lowercase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Dict = []
for batch in dataloader:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = batch.values()
with torch.no_grad():
UpperCAmelCase_ : List[Any] = model(__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
UpperCAmelCase_ , UpperCAmelCase_ : Any = [], []
for logit, targ in logits_and_targets:
logits.append(__lowercase )
targs.append(__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = torch.cat(__lowercase ), torch.cat(__lowercase )
return logits, targs
def snake_case_ ( __lowercase , __lowercase=8_2 , __lowercase=False , __lowercase=False , __lowercase=1_6 ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = get_basic_setup(__lowercase , __lowercase , __lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = generate_predictions(__lowercase , __lowercase , __lowercase )
assert (
len(__lowercase ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__lowercase )}'''
def snake_case_ ( __lowercase = False , __lowercase = False ):
UpperCAmelCase_ : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = get_mrpc_setup(__lowercase , __lowercase )
# First do baseline
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = setup['''no''']
model.to(__lowercase )
model.eval()
for batch in dataloader:
batch.to(__lowercase )
with torch.inference_mode():
UpperCAmelCase_ : str = model(**__lowercase )
UpperCAmelCase_ : Dict = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__lowercase , references=batch['''labels'''] )
UpperCAmelCase_ : Optional[int] = metric.compute()
# Then do distributed
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
UpperCAmelCase_ : Optional[int] = model(**__lowercase )
UpperCAmelCase_ : int = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ : Optional[int] = batch['''labels''']
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__lowercase , references=__lowercase )
UpperCAmelCase_ : Dict = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def snake_case_ ( ):
UpperCAmelCase_ : str = Accelerator(split_batches=__lowercase , dispatch_batches=__lowercase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(__lowercase , __lowercase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
UpperCAmelCase_ : Optional[Any] = Accelerator(split_batches=__lowercase , dispatch_batches=__lowercase )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(__lowercase , 9_9 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
UpperCAmelCase_ : List[Any] = Accelerator()
test_torch_metrics(__lowercase , 5_1_2 )
accelerator.state._reset_state()
def snake_case_ ( __lowercase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 641
| 1
|
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase :
'''simple docstring'''
def __init__( self :Dict , lowerCamelCase_ :str , lowerCamelCase_ :int=2 , lowerCamelCase_ :str=3 , lowerCamelCase_ :Tuple=4 , lowerCamelCase_ :Dict=2 , lowerCamelCase_ :int=7 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :Any=True , lowerCamelCase_ :str=True , lowerCamelCase_ :Dict=9_9 , lowerCamelCase_ :List[str]=3_6 , lowerCamelCase_ :List[str]=2 , lowerCamelCase_ :List[str]=4 , lowerCamelCase_ :str=3_7 , lowerCamelCase_ :Optional[int]="gelu" , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :Union[str, Any]=0.1 , lowerCamelCase_ :Tuple=5_1_2 , lowerCamelCase_ :int=1_6 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :int=0.02 , lowerCamelCase_ :str=6 , lowerCamelCase_ :Any=6 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :str=4 , lowerCamelCase_ :List[Any]=None , lowerCamelCase_ :Tuple=1_0_0_0 , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = coordinate_size
UpperCamelCase__ = shape_size
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = scope
UpperCamelCase__ = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCamelCase__ = text_seq_length
UpperCamelCase__ = (image_size // patch_size) ** 2 + 1
UpperCamelCase__ = self.text_seq_length + self.image_seq_length
def lowerCamelCase__ ( self :Dict ) -> Any:
"""simple docstring"""
UpperCamelCase__ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
UpperCamelCase__ = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCamelCase__ = bbox[i, j, 3]
UpperCamelCase__ = bbox[i, j, 1]
UpperCamelCase__ = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCamelCase__ = bbox[i, j, 2]
UpperCamelCase__ = bbox[i, j, 0]
UpperCamelCase__ = tmp_coordinate
UpperCamelCase__ = tf.constant(lowerCamelCase_ )
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCamelCase__ = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCamelCase__ ( self :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :str , lowerCamelCase_ :Dict , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Tuple ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ = TFLayoutLMvaModel(config=lowerCamelCase_ )
# text + image
UpperCamelCase__ = model(lowerCamelCase_ , pixel_values=lowerCamelCase_ , training=lowerCamelCase_ )
UpperCamelCase__ = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , training=lowerCamelCase_ , )
UpperCamelCase__ = model(lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , training=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCamelCase__ = model(lowerCamelCase_ , training=lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCamelCase__ = model({"pixel_values": pixel_values} , training=lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowerCamelCase__ ( self :int , lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Dict , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any ) -> Any:
"""simple docstring"""
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFLayoutLMvaForSequenceClassification(config=lowerCamelCase_ )
UpperCamelCase__ = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , training=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self :Dict , lowerCamelCase_ :int , lowerCamelCase_ :Dict , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Dict , lowerCamelCase_ :str , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFLayoutLMvaForTokenClassification(config=lowerCamelCase_ )
UpperCamelCase__ = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , training=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowerCamelCase__ ( self :Tuple , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Dict , lowerCamelCase_ :Dict , lowerCamelCase_ :str , lowerCamelCase_ :Union[str, Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = 2
UpperCamelCase__ = TFLayoutLMvaForQuestionAnswering(config=lowerCamelCase_ )
UpperCamelCase__ = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , training=lowerCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self :Tuple ) -> str:
"""simple docstring"""
UpperCamelCase__ = self.prepare_config_and_inputs()
((UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__)) = config_and_inputs
UpperCamelCase__ = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
A = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
A = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
A = False
A = False
A = False
def lowerCamelCase__ ( self :Optional[int] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Any , lowerCamelCase_ :Any , lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[Any] ) -> int:
"""simple docstring"""
return True
def lowerCamelCase__ ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str]=False ) -> dict:
"""simple docstring"""
UpperCamelCase__ = copy.deepcopy(lowerCamelCase_ )
if model_class in get_values(lowerCamelCase_ ):
UpperCamelCase__ = {
k: tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(lowerCamelCase_ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
UpperCamelCase__ = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase_ ):
UpperCamelCase__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
UpperCamelCase__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase_ ):
UpperCamelCase__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase_ ):
UpperCamelCase__ = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def lowerCamelCase__ ( self :Dict ) -> Dict:
"""simple docstring"""
UpperCamelCase__ = TFLayoutLMvaModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=3_7 )
def lowerCamelCase__ ( self :Any ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self :Dict ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(lowerCamelCase_ )
if getattr(lowerCamelCase_ , "hf_compute_loss" , lowerCamelCase_ ):
# The number of elements in the loss should be the same as the number of elements in the label
UpperCamelCase__ = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase_ , return_labels=lowerCamelCase_ )
UpperCamelCase__ = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowerCamelCase_ )[0]
]
UpperCamelCase__ = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
UpperCamelCase__ = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase_ , return_labels=lowerCamelCase_ )
UpperCamelCase__ = prepared_for_class.pop("input_ids" )
UpperCamelCase__ = model(lowerCamelCase_ , **lowerCamelCase_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
UpperCamelCase__ = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase_ , return_labels=lowerCamelCase_ )
UpperCamelCase__ = prepared_for_class.pop("input_ids" )
if "labels" in prepared_for_class:
UpperCamelCase__ = prepared_for_class["labels"].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
UpperCamelCase__ = -1_0_0
UpperCamelCase__ = tf.convert_to_tensor(lowerCamelCase_ )
UpperCamelCase__ = model(lowerCamelCase_ , **lowerCamelCase_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
UpperCamelCase__ = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase_ , return_labels=lowerCamelCase_ )
UpperCamelCase__ = model(lowerCamelCase_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
UpperCamelCase__ = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase_ , return_labels=lowerCamelCase_ )
# Get keys that were added with the _prepare_for_class function
UpperCamelCase__ = prepared_for_class.keys() - inputs_dict.keys()
UpperCamelCase__ = inspect.signature(model.call ).parameters
UpperCamelCase__ = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
UpperCamelCase__ = {0: "input_ids"}
for label_key in label_keys:
UpperCamelCase__ = signature_names.index(lowerCamelCase_ )
UpperCamelCase__ = label_key
UpperCamelCase__ = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
UpperCamelCase__ = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
UpperCamelCase__ = prepared_for_class[value]
UpperCamelCase__ = tuple(lowerCamelCase_ )
# Send to model
UpperCamelCase__ = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def lowerCamelCase__ ( self :str ) -> Union[str, Any]:
"""simple docstring"""
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase__ ( self :List[str] ) -> Dict:
"""simple docstring"""
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase__ = type
self.model_tester.create_and_check_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase__ ( self :str ) -> List[Any]:
"""simple docstring"""
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase__ ( self :List[str] ) -> List[str]:
"""simple docstring"""
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase__ ( self :Union[str, Any] ) -> List[Any]:
"""simple docstring"""
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
@slow
def lowerCamelCase__ ( self :List[Any] ) -> str:
"""simple docstring"""
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = TFLayoutLMvaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def snake_case__ ( ):
"""simple docstring"""
UpperCamelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase__ ( self :Optional[Any] ) -> List[str]:
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase_ ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self :Optional[Any] ) -> Any:
"""simple docstring"""
UpperCamelCase__ = TFLayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=lowerCamelCase_ , return_tensors="tf" ).pixel_values
UpperCamelCase__ = tf.constant([[1, 2]] )
UpperCamelCase__ = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
UpperCamelCase__ = model(input_ids=lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , training=lowerCamelCase_ )
# verify the logits
UpperCamelCase__ = (1, 1_9_9, 7_6_8)
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase_ )
UpperCamelCase__ = tf.constant(
[[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase_ , atol=1e-4 ) )
| 516
|
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def snake_case__ ( _snake_case : List[str] , _snake_case : Optional[Any] ):
"""simple docstring"""
assert isinstance(_snake_case , _snake_case )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def snake_case__ ( _snake_case : str , _snake_case : List[str] , _snake_case : List[str] ):
"""simple docstring"""
UpperCamelCase__ = tmp_path / "cache"
UpperCamelCase__ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ = JsonDatasetReader(_snake_case , cache_dir=_snake_case , keep_in_memory=_snake_case ).read()
_check_json_dataset(_snake_case , _snake_case )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def snake_case__ ( _snake_case : List[str] , _snake_case : str , _snake_case : List[str] ):
"""simple docstring"""
UpperCamelCase__ = tmp_path / "cache"
UpperCamelCase__ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCamelCase__ = features.copy() if features else default_expected_features
UpperCamelCase__ = (
Features({feature: Value(_snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ = JsonDatasetReader(_snake_case , features=_snake_case , cache_dir=_snake_case ).read()
_check_json_dataset(_snake_case , _snake_case )
@pytest.mark.parametrize(
"features" , [
None,
{"col_3": "float64", "col_1": "string", "col_2": "int64"},
] , )
def snake_case__ ( _snake_case : List[Any] , _snake_case : Tuple , _snake_case : Any ):
"""simple docstring"""
UpperCamelCase__ = tmp_path / "cache"
UpperCamelCase__ = {"col_3": "float64", "col_1": "string", "col_2": "int64"}
UpperCamelCase__ = features.copy() if features else default_expected_features
UpperCamelCase__ = (
Features({feature: Value(_snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ = JsonDatasetReader(_snake_case , features=_snake_case , cache_dir=_snake_case ).read()
assert isinstance(_snake_case , _snake_case )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def snake_case__ ( _snake_case : List[str] , _snake_case : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = {"col_2": "int64", "col_3": "float64", "col_1": "string"}
UpperCamelCase__ = features.copy()
UpperCamelCase__ = (
Features({feature: Value(_snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ = tmp_path / "cache"
UpperCamelCase__ = JsonDatasetReader(_snake_case , features=_snake_case , cache_dir=_snake_case ).read()
assert isinstance(_snake_case , _snake_case )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def snake_case__ ( _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Tuple ):
"""simple docstring"""
UpperCamelCase__ = tmp_path / "cache"
UpperCamelCase__ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCamelCase__ = JsonDatasetReader(_snake_case , cache_dir=_snake_case , split=_snake_case ).read()
_check_json_dataset(_snake_case , _snake_case )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def snake_case__ ( _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : Optional[int] ):
"""simple docstring"""
if issubclass(_snake_case , _snake_case ):
UpperCamelCase__ = jsonl_path
elif issubclass(_snake_case , _snake_case ):
UpperCamelCase__ = [jsonl_path]
UpperCamelCase__ = tmp_path / "cache"
UpperCamelCase__ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCamelCase__ = JsonDatasetReader(_snake_case , cache_dir=_snake_case ).read()
_check_json_dataset(_snake_case , _snake_case )
def snake_case__ ( _snake_case : List[str] , _snake_case : List[Any] , _snake_case : Dict=("train",) ):
"""simple docstring"""
assert isinstance(_snake_case , _snake_case )
for split in splits:
UpperCamelCase__ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def snake_case__ ( _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : int ):
"""simple docstring"""
UpperCamelCase__ = tmp_path / "cache"
UpperCamelCase__ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ = JsonDatasetReader({"train": jsonl_path} , cache_dir=_snake_case , keep_in_memory=_snake_case ).read()
_check_json_datasetdict(_snake_case , _snake_case )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def snake_case__ ( _snake_case : Dict , _snake_case : Union[str, Any] , _snake_case : str ):
"""simple docstring"""
UpperCamelCase__ = tmp_path / "cache"
UpperCamelCase__ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCamelCase__ = features.copy() if features else default_expected_features
UpperCamelCase__ = (
Features({feature: Value(_snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ = JsonDatasetReader({"train": jsonl_path} , features=_snake_case , cache_dir=_snake_case ).read()
_check_json_datasetdict(_snake_case , _snake_case )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def snake_case__ ( _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : Tuple ):
"""simple docstring"""
if split:
UpperCamelCase__ = {split: jsonl_path}
else:
UpperCamelCase__ = "train"
UpperCamelCase__ = {"train": jsonl_path, "test": jsonl_path}
UpperCamelCase__ = tmp_path / "cache"
UpperCamelCase__ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCamelCase__ = JsonDatasetReader(_snake_case , cache_dir=_snake_case ).read()
_check_json_datasetdict(_snake_case , _snake_case , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def snake_case__ ( _snake_case : List[str] ):
"""simple docstring"""
return json.load(_snake_case )
def snake_case__ ( _snake_case : Union[str, Any] ):
"""simple docstring"""
return [json.loads(_snake_case ) for line in buffer]
class lowerCAmelCase :
'''simple docstring'''
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def lowerCamelCase__ ( self :Tuple , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[Any] ) -> int:
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase_ , lowerCamelCase_ , lines=lowerCamelCase_ ).write()
buffer.seek(0 )
UpperCamelCase__ = load_json_function(lowerCamelCase_ )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert isinstance(exported_content[0] , lowerCamelCase_ )
assert len(lowerCamelCase_ ) == 1_0
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def lowerCamelCase__ ( self :str , lowerCamelCase_ :int , lowerCamelCase_ :Dict , lowerCamelCase_ :Tuple , lowerCamelCase_ :str , lowerCamelCase_ :List[str] ) -> Tuple:
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase_ , lowerCamelCase_ , lines=lowerCamelCase_ , orient=lowerCamelCase_ ).write()
buffer.seek(0 )
UpperCamelCase__ = load_json(lowerCamelCase_ )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowerCamelCase_ , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 1_0
else:
assert len(lowerCamelCase_ ) == 1_0
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def lowerCamelCase__ ( self :Any , lowerCamelCase_ :Any , lowerCamelCase_ :int , lowerCamelCase_ :List[Any] ) -> Tuple:
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase_ , lowerCamelCase_ , lines=lowerCamelCase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ = load_json_function(lowerCamelCase_ )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert isinstance(exported_content[0] , lowerCamelCase_ )
assert len(lowerCamelCase_ ) == 1_0
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def lowerCamelCase__ ( self :Any , lowerCamelCase_ :str , lowerCamelCase_ :List[str] , lowerCamelCase_ :Tuple , lowerCamelCase_ :str , lowerCamelCase_ :Dict ) -> Optional[Any]:
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase_ , lowerCamelCase_ , lines=lowerCamelCase_ , orient=lowerCamelCase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ = load_json(lowerCamelCase_ )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowerCamelCase_ , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 1_0
else:
assert len(lowerCamelCase_ ) == 1_0
def lowerCamelCase__ ( self :str , lowerCamelCase_ :Any ) -> Any:
"""simple docstring"""
with pytest.raises(lowerCamelCase_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase_ , lowerCamelCase_ , num_proc=0 )
@pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] )
def lowerCamelCase__ ( self :int , lowerCamelCase_ :List[Any] , lowerCamelCase_ :str , lowerCamelCase_ :str , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int] ) -> str:
"""simple docstring"""
UpperCamelCase__ = tmp_path_factory.mktemp("data" ) / f'test.json.{extension}'
UpperCamelCase__ = str(shared_datadir / f'test_file.json.{extension}' )
JsonDatasetWriter(lowerCamelCase_ , lowerCamelCase_ , compression=lowerCamelCase_ ).write()
with fsspec.open(lowerCamelCase_ , "rb" , compression="infer" ) as f:
UpperCamelCase__ = f.read()
with fsspec.open(lowerCamelCase_ , "rb" , compression="infer" ) as f:
UpperCamelCase__ = f.read()
assert exported_content == original_content
| 516
| 1
|
"""simple docstring"""
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
lowerCAmelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=5_1_2,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F'''could not parse string as bool {string}''' )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
lowerCAmelCase_ : int = parser.parse_args()
lowerCAmelCase_ : Optional[int] = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 378
|
"""simple docstring"""
from __future__ import annotations
import pandas as pd
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = [0] * no_of_processes
UpperCAmelCase = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(lowerCAmelCase ):
UpperCAmelCase = burst_time[i]
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 999999999
UpperCAmelCase = 0
UpperCAmelCase = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(lowerCAmelCase ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
UpperCAmelCase = remaining_time[j]
UpperCAmelCase = j
UpperCAmelCase = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
UpperCAmelCase = remaining_time[short]
if minm == 0:
UpperCAmelCase = 999999999
if remaining_time[short] == 0:
complete += 1
UpperCAmelCase = False
# Find finish time of current process
UpperCAmelCase = increment_time + 1
# Calculate waiting time
UpperCAmelCase = finish_time - arrival_time[short]
UpperCAmelCase = finar - burst_time[short]
if waiting_time[short] < 0:
UpperCAmelCase = 0
# Increment time
increment_time += 1
return waiting_time
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = [0] * no_of_processes
for i in range(lowerCAmelCase ):
UpperCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = 0
UpperCAmelCase = 0
for i in range(lowerCAmelCase ):
UpperCAmelCase = total_waiting_time + waiting_time[i]
UpperCAmelCase = total_turn_around_time + turn_around_time[i]
print(F'''Average waiting time = {total_waiting_time / no_of_processes:.5f}''' )
print("""Average turn around time =""" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('''Enter how many process you want to analyze''')
lowerCAmelCase_ : Optional[Any] = int(input())
lowerCAmelCase_ : List[Any] = [0] * no_of_processes
lowerCAmelCase_ : Optional[Any] = [0] * no_of_processes
lowerCAmelCase_ : int = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('''Enter the arrival time and burst time for process:--''' + str(i + 1))
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = map(int, input().split())
lowerCAmelCase_ : str = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowerCAmelCase_ : str = burst_time
lowerCAmelCase_ : List[Any] = no_of_processes
lowerCAmelCase_ : int = waiting_time
lowerCAmelCase_ : int = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
lowerCAmelCase_ : List[Any] = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'''Process''',
'''BurstTime''',
'''ArrivalTime''',
'''WaitingTime''',
'''TurnAroundTime''',
],
)
# Printing the dataFrame
pd.set_option('''display.max_rows''', fcfs.shape[0] + 1)
print(fcfs)
| 378
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=None , A_=True , A_=[0.4814_5466, 0.457_8275, 0.4082_1073] , A_=[0.2686_2954, 0.2613_0258, 0.2757_7711] , A_=True , )-> List[str]:
_SCREAMING_SNAKE_CASE = size if size is not None else {'height': 224, 'width': 224}
_SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {'height': 18, 'width': 18}
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = min_resolution
_SCREAMING_SNAKE_CASE = max_resolution
_SCREAMING_SNAKE_CASE = do_resize
_SCREAMING_SNAKE_CASE = size
_SCREAMING_SNAKE_CASE = do_center_crop
_SCREAMING_SNAKE_CASE = crop_size
_SCREAMING_SNAKE_CASE = do_normalize
_SCREAMING_SNAKE_CASE = image_mean
_SCREAMING_SNAKE_CASE = image_std
_SCREAMING_SNAKE_CASE = do_convert_rgb
def __magic_name__ ( self )-> List[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __magic_name__ ( self , A_=False , A_=False , A_=False )-> Optional[Any]:
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
_SCREAMING_SNAKE_CASE = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
_SCREAMING_SNAKE_CASE = []
for i in range(self.batch_size ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
_SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs]
if torchify:
_SCREAMING_SNAKE_CASE = [torch.from_numpy(A_ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class __lowercase ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = ChineseCLIPImageProcessor if is_vision_available() else None
def __magic_name__ ( self )-> List[Any]:
_SCREAMING_SNAKE_CASE = ChineseCLIPImageProcessingTester(self , do_center_crop=A_ )
@property
def __magic_name__ ( self )-> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self )-> Any:
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'do_center_crop' ) )
self.assertTrue(hasattr(A_ , 'center_crop' ) )
self.assertTrue(hasattr(A_ , 'do_normalize' ) )
self.assertTrue(hasattr(A_ , 'image_mean' ) )
self.assertTrue(hasattr(A_ , 'image_std' ) )
self.assertTrue(hasattr(A_ , 'do_convert_rgb' ) )
def __magic_name__ ( self )-> Union[str, Any]:
_SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 224, 'width': 224} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
_SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def __magic_name__ ( self )-> List[Any]:
pass
def __magic_name__ ( self )-> Union[str, Any]:
# Initialize image_processing
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __magic_name__ ( self )-> List[str]:
# Initialize image_processing
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __magic_name__ ( self )-> Union[str, Any]:
# Initialize image_processing
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
@require_torch
@require_vision
class __lowercase ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = ChineseCLIPImageProcessor if is_vision_available() else None
def __magic_name__ ( self )-> Union[str, Any]:
_SCREAMING_SNAKE_CASE = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=A_ )
_SCREAMING_SNAKE_CASE = 3
@property
def __magic_name__ ( self )-> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self )-> List[Any]:
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'do_center_crop' ) )
self.assertTrue(hasattr(A_ , 'center_crop' ) )
self.assertTrue(hasattr(A_ , 'do_normalize' ) )
self.assertTrue(hasattr(A_ , 'image_mean' ) )
self.assertTrue(hasattr(A_ , 'image_std' ) )
self.assertTrue(hasattr(A_ , 'do_convert_rgb' ) )
def __magic_name__ ( self )-> Optional[int]:
pass
def __magic_name__ ( self )-> str:
# Initialize image_processing
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 605
|
from __future__ import annotations
class __lowercase :
"""simple docstring"""
def __init__( self , A_ )-> None:
_SCREAMING_SNAKE_CASE = data
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ): # In Order traversal of the tree
"""simple docstring"""
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
"""simple docstring"""
return 1 + max(depth_of_tree(tree.left ) ,depth_of_tree(tree.right ) ) if tree else 0
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
"""simple docstring"""
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def SCREAMING_SNAKE_CASE ( ): # Main function for testing.
"""simple docstring"""
_SCREAMING_SNAKE_CASE = Node(1 )
_SCREAMING_SNAKE_CASE = Node(2 )
_SCREAMING_SNAKE_CASE = Node(3 )
_SCREAMING_SNAKE_CASE = Node(4 )
_SCREAMING_SNAKE_CASE = Node(5 )
_SCREAMING_SNAKE_CASE = Node(6 )
_SCREAMING_SNAKE_CASE = Node(7 )
_SCREAMING_SNAKE_CASE = Node(8 )
_SCREAMING_SNAKE_CASE = Node(9 )
print(is_full_binary_tree(UpperCAmelCase__ ) )
print(depth_of_tree(UpperCAmelCase__ ) )
print('Tree is: ' )
display(UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 605
| 1
|
'''simple docstring'''
from __future__ import annotations
lowerCAmelCase__ : Tuple = [True] * 1_00_00_01
lowerCAmelCase__ : Any = 2
while i * i <= 1_00_00_00:
if seive[i]:
for j in range(i * i, 1_00_00_01, i):
lowerCAmelCase__ : Dict = False
i += 1
def __UpperCamelCase ( _UpperCAmelCase ):
return seive[n]
def __UpperCamelCase ( _UpperCAmelCase ):
return any(digit in "02468" for digit in str(_UpperCAmelCase ) )
def __UpperCamelCase ( _UpperCAmelCase = 1000000 ):
__UpperCAmelCase : Optional[int] = [2] # result already includes the number 2.
for num in range(3, limit + 1, 2 ):
if is_prime(_UpperCAmelCase ) and not contains_an_even_digit(_UpperCAmelCase ):
__UpperCAmelCase : List[str] = str(_UpperCAmelCase )
__UpperCAmelCase : int = [int(str_num[j:] + str_num[:j] ) for j in range(len(_UpperCAmelCase ) )]
if all(is_prime(_UpperCAmelCase ) for i in list_nums ):
result.append(_UpperCAmelCase )
return result
def __UpperCamelCase ( ):
return len(find_circular_primes() )
if __name__ == "__main__":
print(f"{len(find_circular_primes()) = }")
| 329
|
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE = '''BlipImageProcessor'''
SCREAMING_SNAKE_CASE = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = False
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = self.image_processor
def __call__( self : Optional[int] , UpperCAmelCase_ : ImageInput = None , UpperCAmelCase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase_ : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
__UpperCAmelCase : Optional[int] = self.tokenizer
__UpperCAmelCase : Dict = self.tokenizer(
text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
return text_encoding
# add pixel_values
__UpperCAmelCase : Dict = self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ )
if text is not None:
__UpperCAmelCase : Any = self.tokenizer(
text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
else:
__UpperCAmelCase : str = None
if text_encoding is not None:
encoding_image_processor.update(UpperCAmelCase_ )
return encoding_image_processor
def lowerCamelCase_ ( self : Tuple , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : int ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def lowerCamelCase_ ( self : Any , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Tuple ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = self.tokenizer.model_input_names
__UpperCAmelCase : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 329
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger("transformers.models.encodec")
_SCREAMING_SNAKE_CASE = {
"quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited",
"quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size",
"quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed",
"quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg",
}
_SCREAMING_SNAKE_CASE = {
"encoder.model.0.conv.conv": "encoder.layers.0.conv",
"encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv",
"encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv",
"encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv",
"encoder.model.3.conv.conv": "encoder.layers.3.conv",
"encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv",
"encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv",
"encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv",
"encoder.model.6.conv.conv": "encoder.layers.6.conv",
"encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv",
"encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv",
"encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv",
"encoder.model.9.conv.conv": "encoder.layers.9.conv",
"encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv",
"encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv",
"encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv",
"encoder.model.12.conv.conv": "encoder.layers.12.conv",
"encoder.model.13.lstm": "encoder.layers.13.lstm",
"encoder.model.15.conv.conv": "encoder.layers.15.conv",
}
_SCREAMING_SNAKE_CASE = {
"encoder.model.0.conv.norm": "encoder.layers.0.norm",
"encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm",
"encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm",
"encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm",
"encoder.model.3.conv.norm": "encoder.layers.3.norm",
"encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm",
"encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm",
"encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm",
"encoder.model.6.conv.norm": "encoder.layers.6.norm",
"encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm",
"encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm",
"encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm",
"encoder.model.9.conv.norm": "encoder.layers.9.norm",
"encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm",
"encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm",
"encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm",
"encoder.model.12.conv.norm": "encoder.layers.12.norm",
"encoder.model.15.conv.norm": "encoder.layers.15.norm",
}
_SCREAMING_SNAKE_CASE = {
"decoder.model.0.conv.conv": "decoder.layers.0.conv",
"decoder.model.1.lstm": "decoder.layers.1.lstm",
"decoder.model.3.convtr.convtr": "decoder.layers.3.conv",
"decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv",
"decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv",
"decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv",
"decoder.model.6.convtr.convtr": "decoder.layers.6.conv",
"decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv",
"decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv",
"decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv",
"decoder.model.9.convtr.convtr": "decoder.layers.9.conv",
"decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv",
"decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv",
"decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv",
"decoder.model.12.convtr.convtr": "decoder.layers.12.conv",
"decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv",
"decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv",
"decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv",
"decoder.model.15.conv.conv": "decoder.layers.15.conv",
}
_SCREAMING_SNAKE_CASE = {
"decoder.model.0.conv.norm": "decoder.layers.0.norm",
"decoder.model.3.convtr.norm": "decoder.layers.3.norm",
"decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm",
"decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm",
"decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm",
"decoder.model.6.convtr.norm": "decoder.layers.6.norm",
"decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm",
"decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm",
"decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm",
"decoder.model.9.convtr.norm": "decoder.layers.9.norm",
"decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm",
"decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm",
"decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm",
"decoder.model.12.convtr.norm": "decoder.layers.12.norm",
"decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm",
"decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm",
"decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm",
"decoder.model.15.conv.norm": "decoder.layers.15.norm",
}
_SCREAMING_SNAKE_CASE = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
_SCREAMING_SNAKE_CASE = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
def __lowerCamelCase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> Union[str, Any]:
for attribute in key.split(""".""" ):
snake_case = getattr(_lowerCamelCase , _lowerCamelCase )
if weight_type is not None:
snake_case = getattr(_lowerCamelCase , _lowerCamelCase ).shape
else:
snake_case = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
snake_case = value
elif weight_type == "weight_g":
snake_case = value
elif weight_type == "weight_v":
snake_case = value
elif weight_type == "bias":
snake_case = value
elif weight_type == "running_mean":
snake_case = value
elif weight_type == "running_var":
snake_case = value
elif weight_type == "num_batches_tracked":
snake_case = value
elif weight_type == "weight_ih_l0":
snake_case = value
elif weight_type == "weight_hh_l0":
snake_case = value
elif weight_type == "bias_ih_l0":
snake_case = value
elif weight_type == "bias_hh_l0":
snake_case = value
elif weight_type == "weight_ih_l1":
snake_case = value
elif weight_type == "weight_hh_l1":
snake_case = value
elif weight_type == "bias_ih_l1":
snake_case = value
elif weight_type == "bias_hh_l1":
snake_case = value
else:
snake_case = value
logger.info(F'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' )
def __lowerCamelCase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple ) -> int:
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
snake_case = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __lowerCamelCase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] ) -> str:
snake_case = []
if model_name == "encodec_24khz" or "encodec_32khz":
snake_case = MAPPING_24K
elif model_name == "encodec_48khz":
snake_case = MAPPING_48K
else:
raise ValueError(F'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(_lowerCamelCase , _lowerCamelCase ):
logger.info(F'''{name} was ignored''' )
continue
snake_case = False
for key, mapped_key in MAPPING.items():
if "*" in key:
snake_case = key.split(""".*.""" )
if prefix in name and suffix in name:
snake_case = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("""embed""" ) and name.endswith("""embed_avg""" ):
continue
snake_case = True
if "*" in mapped_key:
snake_case = name.split(_lowerCamelCase )[0].split(""".""" )[-2]
snake_case = mapped_key.replace("""*""" , _lowerCamelCase )
if "weight_g" in name:
snake_case = """weight_g"""
elif "weight_v" in name:
snake_case = """weight_v"""
elif "weight_ih_l0" in name:
snake_case = """weight_ih_l0"""
elif "weight_hh_l0" in name:
snake_case = """weight_hh_l0"""
elif "bias_ih_l0" in name:
snake_case = """bias_ih_l0"""
elif "bias_hh_l0" in name:
snake_case = """bias_hh_l0"""
elif "weight_ih_l1" in name:
snake_case = """weight_ih_l1"""
elif "weight_hh_l1" in name:
snake_case = """weight_hh_l1"""
elif "bias_ih_l1" in name:
snake_case = """bias_ih_l1"""
elif "bias_hh_l1" in name:
snake_case = """bias_hh_l1"""
elif "bias" in name:
snake_case = """bias"""
elif "weight" in name:
snake_case = """weight"""
elif "running_mean" in name:
snake_case = """running_mean"""
elif "running_var" in name:
snake_case = """running_var"""
elif "num_batches_tracked" in name:
snake_case = """num_batches_tracked"""
else:
snake_case = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def __lowerCamelCase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : str=None , ) -> List[str]:
if config_path is not None:
snake_case = EncodecConfig.from_pretrained(_lowerCamelCase )
else:
snake_case = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
snake_case = [8, 5, 4, 4]
snake_case = [2.2]
snake_case = 64
snake_case = 3_20_00
snake_case = 20_48
snake_case = False
snake_case = False
snake_case = False
elif model_name == "encodec_48khz":
snake_case = [8, 5, 4, 2]
snake_case = [3.0, 6.0, 12.0, 24.0]
snake_case = 4_80_00
snake_case = 2
snake_case = False
snake_case = """time_group_norm"""
snake_case = True
snake_case = 1.0
snake_case = 0.01
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
snake_case = EncodecModel(_lowerCamelCase )
snake_case = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(_lowerCamelCase )
snake_case = torch.load(_lowerCamelCase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
snake_case = original_checkpoint["""best_state"""]
recursively_load_weights(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
if repo_id:
print("""Pushing to the hub...""" )
feature_extractor.push_to_hub(_lowerCamelCase )
model.push_to_hub(_lowerCamelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"--model",
default="encodec_24khz",
type=str,
help="The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 369
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "bloom"
_lowerCAmelCase = ["past_key_values"]
_lowerCAmelCase = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__(self , _lowercase=250880 , _lowercase=64 , _lowercase=2 , _lowercase=8 , _lowercase=1e-5 , _lowercase=0.02 , _lowercase=True , _lowercase=1 , _lowercase=2 , _lowercase=False , _lowercase=0.0 , _lowercase=0.0 , _lowercase=1 , _lowercase=False , **_lowercase , ):
'''simple docstring'''
__a : Tuple = vocab_size
# Backward compatibility with n_embed kwarg
__a : Tuple = kwargs.pop("""n_embed""" , _lowercase )
__a : Optional[Any] = hidden_size if n_embed is None else n_embed
__a : Optional[int] = n_layer
__a : Optional[int] = n_head
__a : Union[str, Any] = layer_norm_epsilon
__a : Optional[Any] = initializer_range
__a : List[str] = use_cache
__a : List[str] = pretraining_tp
__a : Optional[Any] = apply_residual_connection_post_layernorm
__a : Optional[int] = hidden_dropout
__a : List[str] = attention_dropout
__a : Tuple = bos_token_id
__a : Tuple = eos_token_id
__a : List[str] = slow_but_exact
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = version.parse("1.12" )
def __init__(self , _lowercase , _lowercase = "default" , _lowercase = None , _lowercase = False , ):
'''simple docstring'''
super().__init__(_lowercase , task=_lowercase , patching_specs=_lowercase , use_past=_lowercase )
if not getattr(self._config , """pad_token_id""" , _lowercase ):
# TODO: how to do that better?
__a : Any = 0
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(_lowercase , direction="""inputs""" , inverted_values_shape=_lowercase )
__a : int = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__a : Union[str, Any] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self._config.n_layer
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self._config.n_head
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 1e-3
def lowerCAmelCase__(self , _lowercase , _lowercase = -1 , _lowercase = -1 , _lowercase = False , _lowercase = None , ):
'''simple docstring'''
__a : Union[str, Any] = super(_lowercase , self ).generate_dummy_inputs(
_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
# We need to order the input in the way they appears in the forward()
__a : List[str] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__a , __a : Optional[int] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__a : str = seqlen + 2
__a : List[Any] = self._config.hidden_size // self.num_attention_heads
__a : List[Any] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
__a : Optional[int] = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
__a : int = [
(torch.zeros(_lowercase ), torch.zeros(_lowercase )) for _ in range(self.num_layers )
]
__a : int = common_inputs["""attention_mask"""]
if self.use_past:
__a : int = ordered_inputs["""attention_mask"""].dtype
__a : Union[str, Any] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(_lowercase , _lowercase , dtype=_lowercase )] , dim=1 )
return ordered_inputs
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 13
| 581
| 0
|
def A_ ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = len(UpperCamelCase__ ), len(grid[0] )
if (
min(UpperCamelCase__ , UpperCamelCase__ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
SCREAMING_SNAKE_CASE = 0
count += depth_first_search(UpperCamelCase__ , row + 1 , UpperCamelCase__ , UpperCamelCase__ )
count += depth_first_search(UpperCamelCase__ , row - 1 , UpperCamelCase__ , UpperCamelCase__ )
count += depth_first_search(UpperCamelCase__ , UpperCamelCase__ , col + 1 , UpperCamelCase__ )
count += depth_first_search(UpperCamelCase__ , UpperCamelCase__ , col - 1 , UpperCamelCase__ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715
|
__UpperCAmelCase = 9.80_665
def A_ ( lowercase_ , lowercase_ , lowercase_ = g ) ->float:
"""simple docstring"""
if fluid_density <= 0:
raise ValueError('Impossible fluid density' )
if volume < 0:
raise ValueError('Impossible Object volume' )
if gravity <= 0:
raise ValueError('Impossible Gravity' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 259
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[Any] = {
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class snake_case ( UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ = 'swin'
lowercase_ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : str , a_ : Dict=224 , a_ : Optional[int]=4 , a_ : Optional[Any]=3 , a_ : Optional[int]=96 , a_ : List[str]=[2, 2, 6, 2] , a_ : Union[str, Any]=[3, 6, 12, 24] , a_ : int=7 , a_ : List[Any]=4.0 , a_ : Any=True , a_ : str=0.0 , a_ : str=0.0 , a_ : List[str]=0.1 , a_ : Union[str, Any]="gelu" , a_ : List[str]=False , a_ : List[str]=0.02 , a_ : Union[str, Any]=1e-5 , a_ : int=32 , a_ : Optional[Any]=None , a_ : List[Any]=None , **a_ : Dict , )-> Any:
"""simple docstring"""
super().__init__(**a_ )
SCREAMING_SNAKE_CASE__ : Dict = image_size
SCREAMING_SNAKE_CASE__ : Optional[int] = patch_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE__ : Dict = embed_dim
SCREAMING_SNAKE_CASE__ : Optional[Any] = depths
SCREAMING_SNAKE_CASE__ : List[Any] = len(a_ )
SCREAMING_SNAKE_CASE__ : str = num_heads
SCREAMING_SNAKE_CASE__ : Any = window_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = mlp_ratio
SCREAMING_SNAKE_CASE__ : Any = qkv_bias
SCREAMING_SNAKE_CASE__ : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = drop_path_rate
SCREAMING_SNAKE_CASE__ : Dict = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_absolute_embeddings
SCREAMING_SNAKE_CASE__ : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Tuple = initializer_range
SCREAMING_SNAKE_CASE__ : List[str] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE__ : Optional[int] = int(embed_dim * 2 ** (len(a_ ) - 1) )
SCREAMING_SNAKE_CASE__ : Tuple = ['stem'] + [F'''stage{idx}''' for idx in range(1 , len(a_ ) + 1 )]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = get_aligned_output_features_output_indices(
out_features=a_ , out_indices=a_ , stage_names=self.stage_names )
class snake_case ( UpperCamelCase_ ):
lowercase_ = version.parse('1.11' )
@property
def __lowercase( self : Optional[Any] )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __lowercase( self : List[str] )-> float:
"""simple docstring"""
return 1e-4
| 85
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"configuration_nllb_moe": [
"NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP",
"NllbMoeConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = [
"NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST",
"NllbMoeForConditionalGeneration",
"NllbMoeModel",
"NllbMoePreTrainedModel",
"NllbMoeTop2Router",
"NllbMoeSparseMLP",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85
| 1
|
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
UpperCAmelCase_ = None
UpperCAmelCase_ = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
UpperCAmelCase_ = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class lowercase__ :
'''simple docstring'''
a : bool = True
a : Optional[str] = None
# Automatically constructed
a : ClassVar[str] = "PIL.Image.Image"
a : ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
a : str = field(default="Image" , init=__lowerCamelCase , repr=__lowerCamelCase )
def __call__( self ) -> Optional[Any]:
"""simple docstring"""
return self.pa_type
def UpperCamelCase__ ( self, __magic_name__ ) -> dict:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if isinstance(__magic_name__, __magic_name__ ):
UpperCamelCase__ : str = np.array(__magic_name__ )
if isinstance(__magic_name__, __magic_name__ ):
return {"path": value, "bytes": None}
elif isinstance(__magic_name__, __magic_name__ ):
return {"path": None, "bytes": value}
elif isinstance(__magic_name__, np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(__magic_name__ )
elif isinstance(__magic_name__, PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(__magic_name__ )
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
f"An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}." )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__=None ) -> "PIL.Image.Image":
"""simple docstring"""
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support decoding images, please install \'Pillow\'.''' )
if token_per_repo_id is None:
UpperCamelCase__ : Union[str, Any] = {}
UpperCamelCase__ ,UpperCamelCase__ : Optional[int] = value['''path'''], value['''bytes''']
if bytes_ is None:
if path is None:
raise ValueError(f"An image should have one of 'path' or 'bytes' but both are None in {value}." )
else:
if is_local_path(__magic_name__ ):
UpperCamelCase__ : Tuple = PIL.Image.open(__magic_name__ )
else:
UpperCamelCase__ : int = path.split('''::''' )[-1]
try:
UpperCamelCase__ : Union[str, Any] = string_to_dict(__magic_name__, config.HUB_DATASETS_URL )['''repo_id''']
UpperCamelCase__ : Optional[int] = token_per_repo_id.get(__magic_name__ )
except ValueError:
UpperCamelCase__ : Union[str, Any] = None
with xopen(__magic_name__, '''rb''', use_auth_token=__magic_name__ ) as f:
UpperCamelCase__ : Any = BytesIO(f.read() )
UpperCamelCase__ : str = PIL.Image.open(bytes_ )
else:
UpperCamelCase__ : int = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def UpperCamelCase__ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
)
def UpperCamelCase__ ( self, __magic_name__ ) -> pa.StructArray:
"""simple docstring"""
if pa.types.is_string(storage.type ):
UpperCamelCase__ : Dict = pa.array([None] * len(__magic_name__ ), type=pa.binary() )
UpperCamelCase__ : Optional[Any] = pa.StructArray.from_arrays([bytes_array, storage], ['''bytes''', '''path'''], mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCamelCase__ : str = pa.array([None] * len(__magic_name__ ), type=pa.string() )
UpperCamelCase__ : Optional[Any] = pa.StructArray.from_arrays([storage, path_array], ['''bytes''', '''path'''], mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
UpperCamelCase__ : Optional[int] = storage.field('''bytes''' )
else:
UpperCamelCase__ : Tuple = pa.array([None] * len(__magic_name__ ), type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
UpperCamelCase__ : int = storage.field('''path''' )
else:
UpperCamelCase__ : Dict = pa.array([None] * len(__magic_name__ ), type=pa.string() )
UpperCamelCase__ : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, path_array], ['''bytes''', '''path'''], mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCamelCase__ : str = pa.array(
[encode_np_array(np.array(__magic_name__ ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()], type=pa.binary(), )
UpperCamelCase__ : Union[str, Any] = pa.array([None] * len(__magic_name__ ), type=pa.string() )
UpperCamelCase__ : str = pa.StructArray.from_arrays(
[bytes_array, path_array], ['''bytes''', '''path'''], mask=bytes_array.is_null() )
return array_cast(__magic_name__, self.pa_type )
def UpperCamelCase__ ( self, __magic_name__ ) -> pa.StructArray:
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(__magic_name__ ):
with xopen(__magic_name__, '''rb''' ) as f:
UpperCamelCase__ : Optional[Any] = f.read()
return bytes_
UpperCamelCase__ : Tuple = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
], type=pa.binary(), )
UpperCamelCase__ : List[str] = pa.array(
[os.path.basename(__magic_name__ ) if path is not None else None for path in storage.field('''path''' ).to_pylist()], type=pa.string(), )
UpperCamelCase__ : Any = pa.StructArray.from_arrays([bytes_array, path_array], ['''bytes''', '''path'''], mask=bytes_array.is_null() )
return array_cast(__magic_name__, self.pa_type )
def lowerCAmelCase_ ( ) -> List[str]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCamelCase__ : Tuple = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def lowerCAmelCase_ ( __UpperCAmelCase: "PIL.Image.Image" ) -> bytes:
UpperCamelCase__ : List[str] = BytesIO()
if image.format in list_image_compression_formats():
UpperCamelCase__ : Optional[int] = image.format
else:
UpperCamelCase__ : Tuple = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF'''
image.save(__UpperCAmelCase , format=__UpperCAmelCase )
return buffer.getvalue()
def lowerCAmelCase_ ( __UpperCAmelCase: "PIL.Image.Image" ) -> dict:
if hasattr(__UpperCAmelCase , '''filename''' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__UpperCAmelCase )}
def lowerCAmelCase_ ( __UpperCAmelCase: np.ndarray ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
UpperCamelCase__ : Tuple = array.dtype
UpperCamelCase__ : List[Any] = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER
UpperCamelCase__ : Tuple = dtype.kind
UpperCamelCase__ : List[str] = dtype.itemsize
UpperCamelCase__ : int = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCamelCase__ : List[Any] = np.dtype('''|u1''' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f"Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays." )
if dtype is not dest_dtype:
warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCamelCase__ : int = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCamelCase__ : str = dtype_byteorder + dtype_kind + str(__UpperCAmelCase )
UpperCamelCase__ : int = np.dtype(__UpperCAmelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f"Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}" )
UpperCamelCase__ : List[str] = PIL.Image.fromarray(array.astype(__UpperCAmelCase ) )
return {"path": None, "bytes": image_to_bytes(__UpperCAmelCase )}
def lowerCAmelCase_ ( __UpperCAmelCase: Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if objs:
UpperCamelCase__ ,UpperCamelCase__ : List[str] = first_non_null_value(__UpperCAmelCase )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__UpperCAmelCase , np.ndarray ):
UpperCamelCase__ : List[Any] = no_op_if_value_is_null(__UpperCAmelCase )
return [obj_to_image_dict_func(__UpperCAmelCase ) for obj in objs]
elif isinstance(__UpperCAmelCase , PIL.Image.Image ):
UpperCamelCase__ : Tuple = no_op_if_value_is_null(__UpperCAmelCase )
return [obj_to_image_dict_func(__UpperCAmelCase ) for obj in objs]
else:
return objs
else:
return objs
| 369
|
def lowerCAmelCase_ ( __UpperCAmelCase: dict ) -> set:
UpperCamelCase__ : int = set()
# edges = list of graph's edges
UpperCamelCase__ : str = get_edges(__UpperCAmelCase )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
UpperCamelCase__ ,UpperCamelCase__ : List[Any] = edges.pop()
chosen_vertices.add(__UpperCAmelCase )
chosen_vertices.add(__UpperCAmelCase )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(__UpperCAmelCase )
return chosen_vertices
def lowerCAmelCase_ ( __UpperCAmelCase: dict ) -> set:
UpperCamelCase__ : Optional[int] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 369
| 1
|
'''simple docstring'''
import cva
import numpy as np
class _lowercase :
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : int ) -> int:
if k in (0.0_4, 0.0_6):
__snake_case = k
__snake_case = window_size
else:
raise ValueError('invalid k value' )
def __str__( self : List[str] ) -> str:
return str(self.k )
def a ( self : Any , SCREAMING_SNAKE_CASE_ : str ) -> tuple[cva.Mat, list[list[int]]]:
__snake_case = cva.imread(SCREAMING_SNAKE_CASE_ , 0 )
__snake_case , __snake_case = img.shape
__snake_case = []
__snake_case = img.copy()
__snake_case = cva.cvtColor(SCREAMING_SNAKE_CASE_ , cva.COLOR_GRAY2RGB )
__snake_case , __snake_case = np.gradient(SCREAMING_SNAKE_CASE_ )
__snake_case = dx**2
__snake_case = dy**2
__snake_case = dx * dy
__snake_case = 0.0_4
__snake_case = self.window_size // 2
for y in range(SCREAMING_SNAKE_CASE_ , h - offset ):
for x in range(SCREAMING_SNAKE_CASE_ , w - offset ):
__snake_case = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case = (wxx * wyy) - (wxy**2)
__snake_case = wxx + wyy
__snake_case = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
_a : int = HarrisCorner(0.04, 3)
_a , _a : Dict = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 56
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = ArgumentParser(
description=(
"PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=lowerCAmelCase_ , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=lowerCAmelCase_ , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=lowerCAmelCase_ )
return parser.parse_args()
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = parse_args()
# Import training_script as a module.
lowerCAmelCase__ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowerCAmelCase__ = script_fpath.stem
lowerCAmelCase__ = importlib.import_module(lowerCAmelCase_ )
# Patch sys.argv
lowerCAmelCase__ = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 61
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a , __a=13 , __a=32 , __a=3 , __a=4 , __a=[10, 20, 30, 40] , __a=[2, 2, 3, 2] , __a=True , __a=True , __a=37 , __a="gelu" , __a=10 , __a=0.0_2 , __a=["stage2", "stage3", "stage4"] , __a=[2, 3, 4] , __a=None , ):
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = num_stages
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = depths
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = num_labels
__lowerCAmelCase = initializer_range
__lowerCAmelCase = out_features
__lowerCAmelCase = out_indices
__lowerCAmelCase = scope
def snake_case ( self ):
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def snake_case ( self ):
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def snake_case ( self , __a , __a , __a ):
__lowerCAmelCase = ConvNextModel(config=__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def snake_case ( self , __a , __a , __a ):
__lowerCAmelCase = ConvNextForImageClassification(__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self , __a , __a , __a ):
__lowerCAmelCase = ConvNextBackbone(config=__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__lowerCAmelCase = None
__lowerCAmelCase = ConvNextBackbone(config=__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def snake_case ( self ):
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Dict =(
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : str =(
{"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : List[str] =True
__UpperCAmelCase : Union[str, Any] =False
__UpperCAmelCase : Tuple =False
__UpperCAmelCase : List[str] =False
__UpperCAmelCase : Tuple =False
def snake_case ( self ):
__lowerCAmelCase = ConvNextModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def snake_case ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self ):
return
@unittest.skip(reason="ConvNext does not use inputs_embeds" )
def snake_case ( self ):
pass
@unittest.skip(reason="ConvNext does not support input and output embeddings" )
def snake_case ( self ):
pass
@unittest.skip(reason="ConvNext does not use feedforward chunking" )
def snake_case ( self ):
pass
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(__a )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
def snake_case ( self ):
def check_hidden_states_output(__a , __a , __a ):
__lowerCAmelCase = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__a , __a ) )
__lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(__a ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(__a , __a , __a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def snake_case ( self ):
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = ConvNextModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case ( self ):
return AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224" ) if is_vision_available() else None
@slow
def snake_case ( self ):
__lowerCAmelCase = ConvNextForImageClassification.from_pretrained("facebook/convnext-tiny-224" ).to(__a )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**__a )
# verify the logits
__lowerCAmelCase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __a )
__lowerCAmelCase = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
@require_torch
class _UpperCamelCase ( unittest.TestCase ,lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =(ConvNextBackbone,) if is_torch_available() else ()
__UpperCAmelCase : Dict =ConvNextConfig
__UpperCAmelCase : List[str] =False
def snake_case ( self ):
__lowerCAmelCase = ConvNextModelTester(self )
| 713
|
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a , __a=13 , __a=10 , __a=3 , __a=2 , __a=2 , __a=True , __a=True , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=10 , __a=0.0_2 , __a="divided_space_time" , __a=None , ):
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_frames
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = attention_type
__lowerCAmelCase = initializer_range
__lowerCAmelCase = scope
__lowerCAmelCase = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
__lowerCAmelCase = (image_size // patch_size) ** 2
__lowerCAmelCase = (num_frames) * self.num_patches_per_frame + 1
def snake_case ( self ):
__lowerCAmelCase = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def snake_case ( self ):
__lowerCAmelCase = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
__lowerCAmelCase = self.num_labels
return config
def snake_case ( self , __a , __a , __a ):
__lowerCAmelCase = TimesformerModel(config=__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , __a , __a , __a ):
__lowerCAmelCase = TimesformerForVideoClassification(__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a )
# verify the logits shape
__lowerCAmelCase = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __a )
def snake_case ( self ):
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] =(TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
__UpperCAmelCase : Tuple =(
{"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : Any =False
__UpperCAmelCase : Optional[int] =False
__UpperCAmelCase : Union[str, Any] =False
__UpperCAmelCase : Any =False
def snake_case ( self ):
__lowerCAmelCase = TimesformerModelTester(self )
__lowerCAmelCase = ConfigTester(
self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def snake_case ( self , __a , __a , __a=False ):
__lowerCAmelCase = copy.deepcopy(__a )
if return_labels:
if model_class in get_values(__a ):
__lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__a )
return inputs_dict
def snake_case ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds" )
def snake_case ( self ):
pass
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(__a )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__a )
@slow
def snake_case ( self ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = TimesformerModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case ( self ):
if not self.has_attentions:
pass
else:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
for model_class in self.all_model_classes:
__lowerCAmelCase = self.model_tester.seq_length
__lowerCAmelCase = self.model_tester.num_frames
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = True
__lowerCAmelCase = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__a , __a ) )
__lowerCAmelCase = outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCAmelCase = True
__lowerCAmelCase = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__a , __a ) )
__lowerCAmelCase = outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
__lowerCAmelCase = len(__a )
# Check attention is always last and order is fine
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + 1 , len(__a ) )
__lowerCAmelCase = outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def snake_case ( self ):
def check_hidden_states_output(__a , __a , __a ):
__lowerCAmelCase = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__a , __a ) )
__lowerCAmelCase = outputs.hidden_states
__lowerCAmelCase = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__a ) , __a )
__lowerCAmelCase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(__a , __a , __a )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
__lowerCAmelCase = np.load(_UpperCamelCase )
return list(_UpperCamelCase )
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case ( self ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def snake_case ( self ):
__lowerCAmelCase = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to(
__a )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_video()
__lowerCAmelCase = image_processor(video[:8] , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**__a )
# verify the logits
__lowerCAmelCase = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , __a )
__lowerCAmelCase = torch.tensor([-0.3_0_1_6, -0.7_7_1_3, -0.4_2_0_5] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
| 282
| 0
|
"""simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def __lowercase ( snake_case_ : Optional[Any] ,snake_case_ : bool = True ,snake_case_ : float = math.inf ,snake_case_ : float = -math.inf ,snake_case_ : float = math.inf ,snake_case_ : float = -math.inf ,snake_case_ : bool = False ,snake_case_ : float = 100 ,snake_case_ : float = 0.01 ,snake_case_ : float = 1 ,) ->Any:
'''simple docstring'''
__A : int = False
__A : int = search_prob
__A : List[str] = start_temperate
__A : List[Any] = []
__A : Optional[Any] = 0
__A : Optional[Any] = None
while not search_end:
__A : Tuple = current_state.score()
if best_state is None or current_score > best_state.score():
__A : Any = current_state
scores.append(snake_case_ )
iterations += 1
__A : List[Any] = None
__A : Any = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
__A : str = random.randint(0 ,len(snake_case_ ) - 1 ) # picking a random neighbor
__A : Dict = neighbors.pop(snake_case_ )
__A : Optional[int] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
__A : str = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
__A : Optional[Any] = picked_neighbor
else:
__A : str = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
__A : List[str] = picked_neighbor
__A : Optional[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
__A : Union[str, Any] = True
else:
__A : List[Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(snake_case_ ) ,snake_case_ )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def __lowercase ( snake_case_ : Any ,snake_case_ : Optional[int] ) ->Tuple:
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
a_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
a_ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
a_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
a_ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def __lowercase ( snake_case_ : Union[str, Any] ,snake_case_ : List[str] ) ->Union[str, Any]:
'''simple docstring'''
return (3 * x**2) - (6 * y)
a_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a_ = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
f'''{local_min.score()}'''
)
a_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a_ = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
f'''{local_min.score()}'''
)
| 177
|
"""simple docstring"""
def __lowercase ( snake_case_ : list ) ->float:
'''simple docstring'''
__A : Tuple = 0
while len(snake_case_ ) > 1:
__A : List[Any] = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
__A : Dict = files.index(min(snake_case_ ) )
temp += files[min_index]
files.pop(snake_case_ )
files.append(snake_case_ )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 177
| 1
|
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
a__ : Any = logging.get_logger(__name__)
a__ : Tuple = {
'''tensor(bool)''': np.bool_,
'''tensor(int8)''': np.inta,
'''tensor(uint8)''': np.uinta,
'''tensor(int16)''': np.intaa,
'''tensor(uint16)''': np.uintaa,
'''tensor(int32)''': np.intaa,
'''tensor(uint32)''': np.uintaa,
'''tensor(int64)''': np.intaa,
'''tensor(uint64)''': np.uintaa,
'''tensor(float16)''': np.floataa,
'''tensor(float)''': np.floataa,
'''tensor(double)''': np.floataa,
}
class lowercase :
"""simple docstring"""
def __init__( self : List[str] , a_ : Union[str, Any]=None , **a_ : Tuple ):
"""simple docstring"""
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
lowerCamelCase__ = model
lowerCamelCase__ = kwargs.get("""model_save_dir""" , lowerCAmelCase__ )
lowerCamelCase__ = kwargs.get("""latest_model_name""" , lowerCAmelCase__ )
def __call__( self : List[Any] , **a_ : List[Any] ):
"""simple docstring"""
lowerCamelCase__ = {k: np.array(lowerCAmelCase__ ) for k, v in kwargs.items()}
return self.model.run(lowerCAmelCase__ , lowerCAmelCase__ )
@staticmethod
def _UpperCamelCase ( a_ : Dict , a_ : Optional[int]=None , a_ : List[Any]=None ):
"""simple docstring"""
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
lowerCamelCase__ = "CPUExecutionProvider"
return ort.InferenceSession(lowerCAmelCase__ , providers=[provider] , sess_options=lowerCAmelCase__ )
def _UpperCamelCase ( self : int , a_ : Any , a_ : str = None , **a_ : List[Any] ):
"""simple docstring"""
lowerCamelCase__ = file_name if file_name is not None else ONNX_WEIGHTS_NAME
lowerCamelCase__ = self.model_save_dir.joinpath(self.latest_model_name )
lowerCamelCase__ = Path(lowerCAmelCase__ ).joinpath(lowerCAmelCase__ )
try:
shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
lowerCamelCase__ = self.model_save_dir.joinpath(lowerCAmelCase__ )
if src_path.exists():
lowerCamelCase__ = Path(lowerCAmelCase__ ).joinpath(lowerCAmelCase__ )
try:
shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__ )
except shutil.SameFileError:
pass
def _UpperCamelCase ( self : Tuple , a_ : int , **a_ : Optional[int] , ):
"""simple docstring"""
if os.path.isfile(lowerCAmelCase__ ):
logger.error(F'''Provided path ({save_directory}) should be a directory, not a file''' )
return
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
# saving model weights/files
self._save_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def _UpperCamelCase ( cls : Optional[int] , a_ : List[str] , a_ : List[Any] = None , a_ : int = None , a_ : str = False , a_ : Any = None , a_ : List[Any] = None , a_ : List[Any] = None , a_ : List[Any] = None , **a_ : int , ):
"""simple docstring"""
lowerCamelCase__ = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(lowerCAmelCase__ ):
lowerCamelCase__ = OnnxRuntimeModel.load_model(
os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , provider=lowerCAmelCase__ , sess_options=lowerCAmelCase__ )
lowerCamelCase__ = Path(lowerCAmelCase__ )
# load model from hub
else:
# download model
lowerCamelCase__ = hf_hub_download(
repo_id=lowerCAmelCase__ , filename=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , revision=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , )
lowerCamelCase__ = Path(lowerCAmelCase__ ).parent
lowerCamelCase__ = Path(lowerCAmelCase__ ).name
lowerCamelCase__ = OnnxRuntimeModel.load_model(lowerCAmelCase__ , provider=lowerCAmelCase__ , sess_options=lowerCAmelCase__ )
return cls(model=lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def _UpperCamelCase ( cls : Optional[Any] , a_ : List[Any] , a_ : Any = True , a_ : Optional[int] = None , a_ : Optional[Any] = None , **a_ : Tuple , ):
"""simple docstring"""
lowerCamelCase__ = None
if len(str(lowerCAmelCase__ ).split("""@""" ) ) == 2:
lowerCamelCase__ = model_id.split("""@""" )
return cls._from_pretrained(
model_id=lowerCAmelCase__ , revision=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 709
|
from ...processing_utils import ProcessorMixin
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
snake_case_ = 'WhisperFeatureExtractor'
snake_case_ = 'WhisperTokenizer'
def __init__( self : int , a_ : int , a_ : Union[str, Any] ):
"""simple docstring"""
super().__init__(a_ , a_ )
lowerCamelCase__ = self.feature_extractor
lowerCamelCase__ = False
def _UpperCamelCase ( self : str , a_ : Dict=None , a_ : Union[str, Any]=None , a_ : Tuple=True ):
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=a_ , language=a_ , no_timestamps=a_ )
def __call__( self : Any , *a_ : Optional[int] , **a_ : Any ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*a_ , **a_ )
lowerCamelCase__ = kwargs.pop("""audio""" , a_ )
lowerCamelCase__ = kwargs.pop("""sampling_rate""" , a_ )
lowerCamelCase__ = kwargs.pop("""text""" , a_ )
if len(a_ ) > 0:
lowerCamelCase__ = args[0]
lowerCamelCase__ = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
lowerCamelCase__ = self.feature_extractor(a_ , *a_ , sampling_rate=a_ , **a_ )
if text is not None:
lowerCamelCase__ = self.tokenizer(a_ , **a_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCamelCase__ = encodings["""input_ids"""]
return inputs
def _UpperCamelCase ( self : Optional[int] , *a_ : str , **a_ : List[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*a_ , **a_ )
def _UpperCamelCase ( self : Union[str, Any] , *a_ : str , **a_ : List[str] ):
"""simple docstring"""
return self.tokenizer.decode(*a_ , **a_ )
def _UpperCamelCase ( self : int , a_ : str , a_ : Optional[int]="np" ):
"""simple docstring"""
return self.tokenizer.get_prompt_ids(a_ , return_tensors=a_ )
| 235
| 0
|
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self ):
lowercase_ :List[Any] = 10
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = [1, 2, 3, 4]
lowercase_ :Any = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def UpperCamelCase ( self ):
lowercase_ :Dict = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
lowercase_ :str = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def UpperCamelCase ( self ):
lowercase_ :List[str] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
lowercase_ :Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = '''It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'''
lowercase_ , lowercase_ :str = process_story(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [] )
def UpperCamelCase ( self ):
lowercase_ :int = ''''''
lowercase_ , lowercase_ :str = process_story(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [] )
self.assertEqual(__UpperCamelCase , [] )
def UpperCamelCase ( self ):
lowercase_ :Dict = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
lowercase_ , lowercase_ :Any = process_story(__UpperCamelCase )
lowercase_ :List[Any] = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
lowercase_ :int = ['''It was the best of times.''']
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = torch.tensor([1, 2, 3, 4] )
lowercase_ :Union[str, Any] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 0 ).numpy() , expected.numpy() )
def UpperCamelCase ( self ):
lowercase_ :Tuple = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
lowercase_ :Optional[int] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 23 ).numpy() , expected.numpy() )
def UpperCamelCase ( self ):
lowercase_ :int = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
lowercase_ :Union[str, Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 1 ).numpy() , expected.numpy() )
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = 101
lowercase_ :Optional[Any] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
lowercase_ :List[Any] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
lowercase_ :Union[str, Any] = compute_token_type_ids(__UpperCamelCase , __UpperCamelCase )
np.testing.assert_array_equal(__UpperCamelCase , __UpperCamelCase )
| 257
|
from __future__ import annotations
from collections import namedtuple
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> tuple:
_UpperCAmelCase = namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684
| 0
|
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowercase_ ( __snake_case : Optional[int] , __snake_case : Tuple=False ) -> List[str]:
'''simple docstring'''
snake_case__ :List[Any] = OmegaConf.load(__snake_case )
if display:
print(yaml.dump(OmegaConf.to_container(__snake_case ) ) )
return config
def lowercase_ ( __snake_case : Optional[int] , __snake_case : Dict=None , __snake_case : Tuple=None ) -> Optional[int]:
'''simple docstring'''
if conf_path is None:
snake_case__ :str = "./model_checkpoints/vqgan_only.yaml"
snake_case__ :Tuple = load_config(__snake_case , display=__snake_case )
snake_case__ :str = VQModel(**config.model.params )
if ckpt_path is None:
snake_case__ :Union[str, Any] = "./model_checkpoints/vqgan_only.pt"
snake_case__ :List[Any] = torch.load(__snake_case , map_location=__snake_case )
if ".ckpt" in ckpt_path:
snake_case__ :List[Any] = sd["state_dict"]
model.load_state_dict(__snake_case , strict=__snake_case )
model.to(__snake_case )
del sd
return model
def lowercase_ ( __snake_case : int , __snake_case : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case__ , snake_case__ , snake_case__ :Union[str, Any] = model.encode(__snake_case )
print(F'VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}' )
snake_case__ :str = model.decode(__snake_case )
return xrec
def lowercase_ ( __snake_case : int , __snake_case : Dict=False ) -> Tuple:
'''simple docstring'''
snake_case__ , snake_case__ :List[Any] = string.rsplit("." , 1 )
if reload:
snake_case__ :Optional[Any] = importlib.import_module(__snake_case )
importlib.reload(__snake_case )
return getattr(importlib.import_module(__snake_case , package=__snake_case ) , cls )
def lowercase_ ( __snake_case : Optional[Any] ) -> Tuple:
'''simple docstring'''
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def lowercase_ ( __snake_case : Tuple , __snake_case : List[Any] , __snake_case : Optional[Any]=True , __snake_case : Any=True ) -> int:
'''simple docstring'''
snake_case__ :str = instantiate_from_config(__snake_case )
if sd is not None:
model.load_state_dict(__snake_case )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowercase_ ( __snake_case : List[str] , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : Optional[Any] ) -> Tuple:
'''simple docstring'''
if ckpt:
snake_case__ :int = torch.load(__snake_case , map_location="cpu" )
snake_case__ :Optional[int] = pl_sd["global_step"]
print(F'loaded model from global step {global_step}.' )
else:
snake_case__ :Optional[int] = {"state_dict": None}
snake_case__ :Optional[Any] = None
snake_case__ :Dict = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=__snake_case , eval_mode=__snake_case )["model"]
return model, global_step
| 57
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__UpperCAmelCase : Optional[Any] = 1_6
__UpperCAmelCase : Optional[int] = 3_2
def lowercase_ ( __snake_case : Accelerator , __snake_case : int = 16 , __snake_case : str = "bert-base-cased" ) -> Optional[Any]:
'''simple docstring'''
snake_case__ :int = AutoTokenizer.from_pretrained(__snake_case )
snake_case__ :Optional[int] = load_dataset("glue" , "mrpc" )
def tokenize_function(__snake_case : Tuple ):
# max_length=None => use the model max length (it's actually the default)
snake_case__ :Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case__ :List[Any] = datasets.map(
__snake_case , batched=__snake_case , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__snake_case )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case__ :Any = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__snake_case : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__snake_case , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return tokenizer.pad(__snake_case , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
snake_case__ :Any = DataLoader(
tokenized_datasets["train"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
snake_case__ :Tuple = DataLoader(
tokenized_datasets["validation"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
return train_dataloader, eval_dataloader
def lowercase_ ( __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Optional[int] ) -> Tuple:
'''simple docstring'''
model.eval()
snake_case__ :Union[str, Any] = 0
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case__ :List[Any] = model(**__snake_case )
snake_case__ :Any = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
snake_case__ , snake_case__ :Tuple = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__snake_case ) - 1:
snake_case__ :List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
snake_case__ :Optional[int] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
snake_case__ :int = metric.compute()
return eval_metric["accuracy"]
def lowercase_ ( __snake_case : Union[str, Any] , __snake_case : Optional[Any] ) -> Any:
'''simple docstring'''
snake_case__ :Any = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case__ :Union[str, Any] = config["lr"]
snake_case__ :List[str] = int(config["num_epochs"] )
snake_case__ :Optional[Any] = int(config["seed"] )
snake_case__ :List[Any] = int(config["batch_size"] )
snake_case__ :List[Any] = args.model_name_or_path
set_seed(__snake_case )
snake_case__ , snake_case__ :List[Any] = get_dataloaders(__snake_case , __snake_case , __snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case__ :List[Any] = AutoModelForSequenceClassification.from_pretrained(__snake_case , return_dict=__snake_case )
# Instantiate optimizer
snake_case__ :int = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
snake_case__ :Tuple = optimizer_cls(params=model.parameters() , lr=__snake_case )
if accelerator.state.deepspeed_plugin is not None:
snake_case__ :List[str] = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
snake_case__ :Any = 1
snake_case__ :List[Any] = (len(__snake_case ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
snake_case__ :Optional[Any] = get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=0 , num_training_steps=__snake_case , )
else:
snake_case__ :Any = DummyScheduler(__snake_case , total_num_steps=__snake_case , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ :int = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# We need to keep track of how many total steps we have iterated over
snake_case__ :Dict = 0
# We also need to keep track of the stating epoch so files are named properly
snake_case__ :Union[str, Any] = 0
snake_case__ :List[str] = evaluate.load("glue" , "mrpc" )
snake_case__ :Optional[Any] = num_epochs
if args.partial_train_epoch is not None:
snake_case__ :List[Any] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
snake_case__ :Union[str, Any] = args.resume_from_checkpoint.split("epoch_" )[1]
snake_case__ :Dict = ""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
snake_case__ :str = int(__snake_case ) + 1
snake_case__ :List[Any] = evaluation_loop(__snake_case , __snake_case , __snake_case , __snake_case )
accelerator.print("resumed checkpoint performance:" , __snake_case )
accelerator.print("resumed checkpoint's scheduler's lr:" , lr_scheduler.get_lr()[0] )
accelerator.print("resumed optimizers's lr:" , optimizer.param_groups[0]["lr"] )
with open(os.path.join(args.output_dir , F'state_{starting_epoch-1}.json' ) , "r" ) as f:
snake_case__ :Tuple = json.load(__snake_case )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
snake_case__ :Optional[int] = {}
for epoch in range(__snake_case , __snake_case ):
model.train()
for step, batch in enumerate(__snake_case ):
snake_case__ :str = model(**__snake_case )
snake_case__ :List[str] = outputs.loss
snake_case__ :List[Any] = loss / gradient_accumulation_steps
accelerator.backward(__snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
snake_case__ :int = F'epoch_{epoch}'
snake_case__ :str = os.path.join(args.output_dir , __snake_case )
accelerator.save_state(__snake_case )
snake_case__ :Union[str, Any] = evaluation_loop(__snake_case , __snake_case , __snake_case , __snake_case )
snake_case__ :List[str] = accuracy
snake_case__ :List[str] = lr_scheduler.get_lr()[0]
snake_case__ :List[Any] = optimizer.param_groups[0]["lr"]
snake_case__ :Dict = epoch
snake_case__ :List[Any] = overall_step
accelerator.print(F'epoch {epoch}:' , __snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'state_{epoch}.json' ) , "w" ) as f:
json.dump(__snake_case , __snake_case )
def lowercase_ ( ) -> Any:
'''simple docstring'''
snake_case__ :List[Any] = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=__snake_case , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__snake_case , )
parser.add_argument(
"--output_dir" , type=__snake_case , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=__snake_case , default=__snake_case , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--partial_train_epoch" , type=__snake_case , default=__snake_case , help="If passed, the training will stop after this number of epochs." , )
parser.add_argument(
"--num_epochs" , type=__snake_case , default=2 , help="Number of train epochs." , )
snake_case__ :Any = parser.parse_args()
snake_case__ :int = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 57
| 1
|
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_: List[str] =SamImageProcessor()
SCREAMING_SNAKE_CASE_: str =SamProcessor(lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self : Optional[Any] , **lowerCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase ).image_processor
def lowerCamelCase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE_: str =[Image.fromarray(np.moveaxis(lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase__ ( self : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_: List[str] =self.get_image_processor(do_normalize=lowerCAmelCase , padding_value=1.0 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase )
def lowerCamelCase__ ( self : str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =self.get_image_processor()
SCREAMING_SNAKE_CASE_: List[str] =SamProcessor(image_processor=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_: Any =image_processor(lowerCAmelCase , return_tensors="""np""" )
SCREAMING_SNAKE_CASE_: List[str] =processor(images=lowerCAmelCase , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def lowerCamelCase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =self.get_image_processor()
SCREAMING_SNAKE_CASE_: List[str] =SamProcessor(image_processor=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =[torch.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE_: Union[str, Any] =[[1764, 2646]]
SCREAMING_SNAKE_CASE_: Any =[[683, 1024]]
SCREAMING_SNAKE_CASE_: Dict =processor.post_process_masks(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
SCREAMING_SNAKE_CASE_: Union[str, Any] =processor.post_process_masks(
lowerCAmelCase , torch.tensor(lowerCAmelCase ) , torch.tensor(lowerCAmelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
SCREAMING_SNAKE_CASE_: int =[np.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE_: Optional[Any] =processor.post_process_masks(lowerCAmelCase , np.array(lowerCAmelCase ) , np.array(lowerCAmelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
SCREAMING_SNAKE_CASE_: Dict =[[1, 0], [0, 1]]
with self.assertRaises(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: str =processor.post_process_masks(lowerCAmelCase , np.array(lowerCAmelCase ) , np.array(lowerCAmelCase ) )
@require_vision
@require_tf
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_: str =SamImageProcessor()
SCREAMING_SNAKE_CASE_: Tuple =SamProcessor(lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self : List[str] , **lowerCAmelCase : Any ) -> str:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase ).image_processor
def lowerCamelCase__ ( self : Dict ) -> Any:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self : Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE_: Optional[Any] =[Image.fromarray(np.moveaxis(lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_: List[Any] =self.get_image_processor(do_normalize=lowerCAmelCase , padding_value=1.0 )
SCREAMING_SNAKE_CASE_: List[Any] =SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase )
def lowerCamelCase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =self.get_image_processor()
SCREAMING_SNAKE_CASE_: Tuple =SamProcessor(image_processor=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_: List[str] =image_processor(lowerCAmelCase , return_tensors="""np""" )
SCREAMING_SNAKE_CASE_: Tuple =processor(images=lowerCAmelCase , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =self.get_image_processor()
SCREAMING_SNAKE_CASE_: Optional[int] =SamProcessor(image_processor=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =[tf.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE_: Optional[int] =[[1764, 2646]]
SCREAMING_SNAKE_CASE_: List[Any] =[[683, 1024]]
SCREAMING_SNAKE_CASE_: Union[str, Any] =processor.post_process_masks(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
SCREAMING_SNAKE_CASE_: Optional[Any] =processor.post_process_masks(
lowerCAmelCase , tf.convert_to_tensor(lowerCAmelCase ) , tf.convert_to_tensor(lowerCAmelCase ) , return_tensors="""tf""" , )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
SCREAMING_SNAKE_CASE_: List[str] =[np.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE_: List[Any] =processor.post_process_masks(
lowerCAmelCase , np.array(lowerCAmelCase ) , np.array(lowerCAmelCase ) , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
SCREAMING_SNAKE_CASE_: Dict =[[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
SCREAMING_SNAKE_CASE_: Any =processor.post_process_masks(
lowerCAmelCase , np.array(lowerCAmelCase ) , np.array(lowerCAmelCase ) , return_tensors="""tf""" )
@require_vision
@require_torchvision
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Tuple ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_: int =SamImageProcessor()
SCREAMING_SNAKE_CASE_: List[str] =SamProcessor(lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self : str , **lowerCAmelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase ).image_processor
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE_: Optional[int] =[Image.fromarray(np.moveaxis(lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def lowerCamelCase__ ( self : Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =self.get_image_processor()
SCREAMING_SNAKE_CASE_: Dict =SamProcessor(image_processor=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
SCREAMING_SNAKE_CASE_: str =[tf.convert_to_tensor(lowerCAmelCase )]
SCREAMING_SNAKE_CASE_: str =[torch.tensor(lowerCAmelCase )]
SCREAMING_SNAKE_CASE_: Tuple =[[1764, 2646]]
SCREAMING_SNAKE_CASE_: Tuple =[[683, 1024]]
SCREAMING_SNAKE_CASE_: Any =processor.post_process_masks(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , return_tensors="""tf""" )
SCREAMING_SNAKE_CASE_: Tuple =processor.post_process_masks(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , return_tensors="""pt""" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def lowerCamelCase__ ( self : str ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.get_image_processor()
SCREAMING_SNAKE_CASE_: Dict =SamProcessor(image_processor=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_: List[Any] =image_processor(lowerCAmelCase , return_tensors="""pt""" )["""pixel_values"""].numpy()
SCREAMING_SNAKE_CASE_: List[str] =processor(images=lowerCAmelCase , return_tensors="""pt""" )["""pixel_values"""].numpy()
SCREAMING_SNAKE_CASE_: Tuple =image_processor(lowerCAmelCase , return_tensors="""tf""" )["""pixel_values"""].numpy()
SCREAMING_SNAKE_CASE_: int =processor(images=lowerCAmelCase , return_tensors="""tf""" )["""pixel_values"""].numpy()
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase ) )
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase ) )
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase ) )
| 409
|
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/config.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/config.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/config.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/config.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/config.json""",
}
class a ( UpperCAmelCase__ ):
UpperCamelCase : int = 't5'
UpperCamelCase : int = ['past_key_values']
UpperCamelCase : Optional[Any] = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self : List[str] , lowerCAmelCase : str=3_2128 , lowerCAmelCase : List[str]=512 , lowerCAmelCase : str=64 , lowerCAmelCase : List[Any]=2048 , lowerCAmelCase : Union[str, Any]=6 , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Union[str, Any]=8 , lowerCAmelCase : Any=32 , lowerCAmelCase : str=128 , lowerCAmelCase : int=0.1 , lowerCAmelCase : Optional[Any]=1E-6 , lowerCAmelCase : Dict=1.0 , lowerCAmelCase : Optional[Any]="relu" , lowerCAmelCase : List[str]=True , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : Union[str, Any]=0 , lowerCAmelCase : str=1 , **lowerCAmelCase : Union[str, Any] , ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =vocab_size
SCREAMING_SNAKE_CASE_: Optional[int] =d_model
SCREAMING_SNAKE_CASE_: int =d_kv
SCREAMING_SNAKE_CASE_: Any =d_ff
SCREAMING_SNAKE_CASE_: Dict =num_layers
SCREAMING_SNAKE_CASE_: Optional[Any] =(
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
SCREAMING_SNAKE_CASE_: int =num_heads
SCREAMING_SNAKE_CASE_: Optional[Any] =relative_attention_num_buckets
SCREAMING_SNAKE_CASE_: List[Any] =relative_attention_max_distance
SCREAMING_SNAKE_CASE_: List[str] =dropout_rate
SCREAMING_SNAKE_CASE_: List[str] =layer_norm_epsilon
SCREAMING_SNAKE_CASE_: List[Any] =initializer_factor
SCREAMING_SNAKE_CASE_: Tuple =feed_forward_proj
SCREAMING_SNAKE_CASE_: Union[str, Any] =use_cache
SCREAMING_SNAKE_CASE_: Optional[Any] =self.feed_forward_proj.split("""-""" )
SCREAMING_SNAKE_CASE_: Dict =act_info[-1]
SCREAMING_SNAKE_CASE_: Dict =act_info[0] == """gated"""
if len(lowerCAmelCase ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
SCREAMING_SNAKE_CASE_: int ="""gelu_new"""
super().__init__(
pad_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , **lowerCAmelCase , )
class a ( UpperCAmelCase__ ):
@property
def lowerCamelCase__ ( self : str ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict ={
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
SCREAMING_SNAKE_CASE_: Tuple ="""past_encoder_sequence + sequence"""
SCREAMING_SNAKE_CASE_: Dict ={0: """batch"""}
SCREAMING_SNAKE_CASE_: Optional[Any] ={0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
SCREAMING_SNAKE_CASE_: Union[str, Any] ={0: """batch""", 1: """decoder_sequence"""}
SCREAMING_SNAKE_CASE_: Union[str, Any] ={0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase , direction="""inputs""" )
return common_inputs
@property
def lowerCamelCase__ ( self : Any ) -> int:
'''simple docstring'''
return 13
| 409
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a ( a_, unittest.TestCase ):
UpperCAmelCase_ : Any =KandinskyInpaintPipeline
UpperCAmelCase_ : Union[str, Any] =["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
UpperCAmelCase_ : Tuple =[
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
UpperCAmelCase_ : Union[str, Any] =[
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase_ : List[str] =False
@property
def UpperCamelCase_ ( self ):
return 3_2
@property
def UpperCamelCase_ ( self ):
return 3_2
@property
def UpperCamelCase_ ( self ):
return self.time_input_dim
@property
def UpperCamelCase_ ( self ):
return self.time_input_dim * 4
@property
def UpperCamelCase_ ( self ):
return 1_0_0
@property
def UpperCamelCase_ ( self ):
lowercase = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def UpperCamelCase_ ( self ):
torch.manual_seed(0 )
lowercase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
lowercase = MultilingualCLIP(_lowerCamelCase )
lowercase = text_encoder.eval()
return text_encoder
@property
def UpperCamelCase_ ( self ):
torch.manual_seed(0 )
lowercase = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowercase = UNetaDConditionModel(**_lowerCamelCase )
return model
@property
def UpperCamelCase_ ( self ):
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase_ ( self ):
torch.manual_seed(0 )
lowercase = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase_ ( self ):
lowercase = self.dummy_text_encoder
lowercase = self.dummy_tokenizer
lowercase = self.dummy_unet
lowercase = self.dummy_movq
lowercase = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='linear' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=_lowerCamelCase , set_alpha_to_one=_lowerCamelCase , steps_offset=1 , prediction_type='epsilon' , thresholding=_lowerCamelCase , )
lowercase = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase=0 ):
lowercase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
lowercase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_lowerCamelCase )
# create init_image
lowercase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
lowercase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
# create mask
lowercase = np.ones((6_4, 6_4) , dtype=np.floataa )
lowercase = 0
if str(_lowerCamelCase ).startswith('mps' ):
lowercase = torch.manual_seed(_lowerCamelCase )
else:
lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
lowercase = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def UpperCamelCase_ ( self ):
lowercase = 'cpu'
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**_lowerCamelCase )
lowercase = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowercase = pipe(**self.get_dummy_inputs(_lowerCamelCase ) )
lowercase = output.images
lowercase = pipe(
**self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
print(F'image.shape {image.shape}' )
assert image.shape == (1, 6_4, 6_4, 3)
lowercase = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def UpperCamelCase_ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def UpperCamelCase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self ):
lowercase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' )
lowercase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowercase = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
lowercase = 0
lowercase = 'a hat'
lowercase = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCamelCase )
lowercase = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint' , torch_dtype=torch.floataa )
lowercase = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
lowercase = torch.Generator(device='cpu' ).manual_seed(0 )
lowercase , lowercase = pipe_prior(
_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
lowercase = pipeline(
_lowerCamelCase , image=_lowerCamelCase , mask_image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type='np' , )
lowercase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
| 712
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_UpperCamelCase : Union[str, Any] = None
_UpperCamelCase : str = logging.get_logger(__name__)
_UpperCamelCase : Tuple = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
_UpperCamelCase : str = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
_UpperCamelCase : str = {
'facebook/nllb-large-en-ro': 1_0_2_4,
'facebook/nllb-200-distilled-600M': 1_0_2_4,
}
# fmt: off
_UpperCamelCase : Tuple = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class a ( a_ ):
UpperCAmelCase_ : int =VOCAB_FILES_NAMES
UpperCAmelCase_ : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : int =PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Tuple =["input_ids", "attention_mask"]
UpperCAmelCase_ : Any =NllbTokenizer
UpperCAmelCase_ : List[int] =[]
UpperCAmelCase_ : List[int] =[]
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=False , **_lowerCamelCase , ):
# Mask token behave like a normal word, i.e. include the space before it
lowercase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
lowercase = legacy_behaviour
super().__init__(
vocab_file=_lowerCamelCase , tokenizer_file=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , src_lang=_lowerCamelCase , tgt_lang=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , legacy_behaviour=_lowerCamelCase , **_lowerCamelCase , )
lowercase = vocab_file
lowercase = False if not self.vocab_file else True
lowercase = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowercase = {
lang_code: self.convert_tokens_to_ids(_lowerCamelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowercase = src_lang if src_lang is not None else 'eng_Latn'
lowercase = self.convert_tokens_to_ids(self._src_lang )
lowercase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCamelCase_ ( self ):
return self._src_lang
@src_lang.setter
def UpperCamelCase_ ( self , _lowerCamelCase ):
lowercase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ):
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowercase = src_lang
lowercase = self(_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
lowercase = self.convert_tokens_to_ids(_lowerCamelCase )
lowercase = tgt_lang_id
return inputs
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase = "eng_Latn" , _lowerCamelCase = None , _lowerCamelCase = "fra_Latn" , **_lowerCamelCase , ):
lowercase = src_lang
lowercase = tgt_lang
return super().prepare_seqaseq_batch(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
def UpperCamelCase_ ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase_ ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase_ ( self , _lowerCamelCase ):
lowercase = self.convert_tokens_to_ids(_lowerCamelCase )
if self.legacy_behaviour:
lowercase = []
lowercase = [self.eos_token_id, self.cur_lang_code]
else:
lowercase = [self.cur_lang_code]
lowercase = [self.eos_token_id]
lowercase = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase_ ( self , _lowerCamelCase ):
lowercase = self.convert_tokens_to_ids(_lowerCamelCase )
if self.legacy_behaviour:
lowercase = []
lowercase = [self.eos_token_id, self.cur_lang_code]
else:
lowercase = [self.cur_lang_code]
lowercase = [self.eos_token_id]
lowercase = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_lowerCamelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
lowercase = os.path.join(
_lowerCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ):
copyfile(self.vocab_file , _lowerCamelCase )
return (out_vocab_file,)
| 134
| 0
|
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def lowerCAmelCase_ ( lowercase: Any ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase: List[Any] = VideoMAEConfig()
set_architecture_configs(lowercase , lowercase )
if "finetuned" not in model_name:
_UpperCamelCase: str = False
if "finetuned" in model_name:
_UpperCamelCase: Dict = '''huggingface/label-files'''
if "kinetics" in model_name:
_UpperCamelCase: Union[str, Any] = 400
_UpperCamelCase: Any = '''kinetics400-id2label.json'''
elif "ssv2" in model_name:
_UpperCamelCase: Dict = 174
_UpperCamelCase: Dict = '''something-something-v2-id2label.json'''
else:
raise ValueError('''Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.''' )
_UpperCamelCase: List[Any] = json.load(open(hf_hub_download(lowercase , lowercase , repo_type='''dataset''' ) , '''r''' ) )
_UpperCamelCase: Tuple = {int(lowercase ): v for k, v in idalabel.items()}
_UpperCamelCase: Any = idalabel
_UpperCamelCase: str = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( lowercase: Optional[int] , lowercase: str ) -> Optional[int]:
'''simple docstring'''
if "small" in model_name:
_UpperCamelCase: List[Any] = 384
_UpperCamelCase: List[str] = 1_536
_UpperCamelCase: Tuple = 12
_UpperCamelCase: List[Any] = 16
_UpperCamelCase: Dict = 12
_UpperCamelCase: Any = 3
_UpperCamelCase: str = 192
_UpperCamelCase: Any = 768
elif "large" in model_name:
_UpperCamelCase: int = 1_024
_UpperCamelCase: Optional[Any] = 4_096
_UpperCamelCase: List[Any] = 24
_UpperCamelCase: Union[str, Any] = 16
_UpperCamelCase: Tuple = 12
_UpperCamelCase: List[Any] = 8
_UpperCamelCase: Tuple = 512
_UpperCamelCase: Optional[Any] = 2_048
elif "huge" in model_name:
_UpperCamelCase: int = 1_280
_UpperCamelCase: List[str] = 5_120
_UpperCamelCase: Dict = 32
_UpperCamelCase: List[str] = 16
_UpperCamelCase: Optional[int] = 12
_UpperCamelCase: str = 8
_UpperCamelCase: Any = 640
_UpperCamelCase: Optional[Any] = 2_560
elif "base" not in model_name:
raise ValueError('''Model name should include either "small", "base", "large", or "huge"''' )
def lowerCAmelCase_ ( lowercase: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
if "encoder." in name:
_UpperCamelCase: List[Any] = name.replace('''encoder.''' , '''''' )
if "cls_token" in name:
_UpperCamelCase: Union[str, Any] = name.replace('''cls_token''' , '''videomae.embeddings.cls_token''' )
if "decoder_pos_embed" in name:
_UpperCamelCase: Optional[int] = name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
_UpperCamelCase: Dict = name.replace('''pos_embed''' , '''videomae.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
_UpperCamelCase: Union[str, Any] = name.replace('''patch_embed.proj''' , '''videomae.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_UpperCamelCase: List[Any] = name.replace('''patch_embed.norm''' , '''videomae.embeddings.norm''' )
if "decoder.blocks" in name:
_UpperCamelCase: str = name.replace('''decoder.blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
_UpperCamelCase: Optional[int] = name.replace('''blocks''' , '''videomae.encoder.layer''' )
if "attn.proj" in name:
_UpperCamelCase: int = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "bias" not in name:
_UpperCamelCase: Dict = name.replace('''attn''' , '''attention.self''' )
if "attn" in name:
_UpperCamelCase: str = name.replace('''attn''' , '''attention.attention''' )
if "norm1" in name:
_UpperCamelCase: Optional[Any] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_UpperCamelCase: Optional[Any] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
_UpperCamelCase: Optional[int] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_UpperCamelCase: Any = name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
_UpperCamelCase: List[str] = name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
_UpperCamelCase: List[str] = name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
_UpperCamelCase: Any = name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
_UpperCamelCase: Dict = name.replace('''norm.weight''' , '''videomae.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
_UpperCamelCase: List[Any] = name.replace('''norm.bias''' , '''videomae.layernorm.bias''' )
if "head" in name and "decoder" not in name:
_UpperCamelCase: Optional[int] = name.replace('''head''' , '''classifier''' )
return name
def lowerCAmelCase_ ( lowercase: List[str] , lowercase: Any ) -> Optional[Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_UpperCamelCase: List[Any] = orig_state_dict.pop(lowercase )
if key.startswith('''encoder.''' ):
_UpperCamelCase: Optional[Any] = key.replace('''encoder.''' , '''''' )
if "qkv" in key:
_UpperCamelCase: Union[str, Any] = key.split('''.''' )
if key.startswith('''decoder.blocks''' ):
_UpperCamelCase: Union[str, Any] = config.decoder_hidden_size
_UpperCamelCase: Optional[int] = int(key_split[2] )
_UpperCamelCase: Any = '''decoder.decoder_layers.'''
if "weight" in key:
_UpperCamelCase: Union[str, Any] = val[:dim, :]
_UpperCamelCase: Optional[int] = val[dim : dim * 2, :]
_UpperCamelCase: Tuple = val[-dim:, :]
else:
_UpperCamelCase: Any = config.hidden_size
_UpperCamelCase: int = int(key_split[1] )
_UpperCamelCase: Tuple = '''videomae.encoder.layer.'''
if "weight" in key:
_UpperCamelCase: List[Any] = val[:dim, :]
_UpperCamelCase: Tuple = val[dim : dim * 2, :]
_UpperCamelCase: List[Any] = val[-dim:, :]
else:
_UpperCamelCase: Union[str, Any] = val
return orig_state_dict
def lowerCAmelCase_ ( ) -> List[str]:
'''simple docstring'''
_UpperCamelCase: Tuple = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
_UpperCamelCase: List[Any] = np.load(lowercase )
return list(lowercase )
def lowerCAmelCase_ ( lowercase: Tuple , lowercase: List[str] , lowercase: Dict , lowercase: str ) -> int:
'''simple docstring'''
_UpperCamelCase: Optional[Any] = get_videomae_config(lowercase )
if "finetuned" in model_name:
_UpperCamelCase: Dict = VideoMAEForVideoClassification(lowercase )
else:
_UpperCamelCase: int = VideoMAEForPreTraining(lowercase )
# download original checkpoint, hosted on Google Drive
_UpperCamelCase: List[Any] = '''pytorch_model.bin'''
gdown.cached_download(lowercase , lowercase , quiet=lowercase )
_UpperCamelCase: Any = torch.load(lowercase , map_location='''cpu''' )
if "model" in files:
_UpperCamelCase: Optional[Any] = files['''model''']
else:
_UpperCamelCase: Optional[Any] = files['''module''']
_UpperCamelCase: Any = convert_state_dict(lowercase , lowercase )
model.load_state_dict(lowercase )
model.eval()
# verify model on basic input
_UpperCamelCase: Optional[Any] = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
_UpperCamelCase: Optional[int] = prepare_video()
_UpperCamelCase: int = image_processor(lowercase , return_tensors='''pt''' )
if "finetuned" not in model_name:
_UpperCamelCase: str = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
_UpperCamelCase: Optional[int] = torch.load(lowercase )
_UpperCamelCase: List[Any] = model(**lowercase )
_UpperCamelCase: Any = outputs.logits
_UpperCamelCase: Optional[Any] = [
'''videomae-small-finetuned-kinetics''',
'''videomae-small-finetuned-ssv2''',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'''videomae-base-short''',
'''videomae-base-short-finetuned-kinetics''',
'''videomae-base''',
'''videomae-base-finetuned-kinetics''',
'''videomae-large''',
'''videomae-large-finetuned-kinetics''',
'''videomae-huge-finetuned-kinetics''',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'''videomae-base-short-ssv2''',
'''videomae-base-short-finetuned-ssv2''',
'''videomae-base-ssv2''',
'''videomae-base-finetuned-ssv2''',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
_UpperCamelCase: List[str] = torch.Size([1, 400] )
_UpperCamelCase: Dict = torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
_UpperCamelCase: str = torch.Size([1, 174] )
_UpperCamelCase: str = torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
_UpperCamelCase: Any = torch.Size([1, 1_408, 1_536] )
_UpperCamelCase: str = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
_UpperCamelCase: Dict = torch.Size([1, 1_408, 1_536] )
_UpperCamelCase: Optional[int] = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
_UpperCamelCase: str = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
_UpperCamelCase: str = torch.Size([1, 1_408, 1_536] )
_UpperCamelCase: int = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
_UpperCamelCase: Dict = torch.Size([1, 400] )
_UpperCamelCase: Dict = torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
_UpperCamelCase: Tuple = torch.Size([1, 400] )
_UpperCamelCase: Optional[int] = torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
_UpperCamelCase: Dict = torch.Size([1, 400] )
_UpperCamelCase: str = torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
_UpperCamelCase: int = torch.Size([1, 400] )
_UpperCamelCase: List[str] = torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
_UpperCamelCase: str = torch.Size([1, 1_408, 1_536] )
_UpperCamelCase: List[str] = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
_UpperCamelCase: str = torch.Size([1, 174] )
_UpperCamelCase: List[Any] = torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
_UpperCamelCase: str = torch.Size([1, 1_408, 1_536] )
_UpperCamelCase: int = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
_UpperCamelCase: Tuple = torch.Size([1, 174] )
_UpperCamelCase: Optional[Any] = torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(F"""Model name not supported. Should be one of {model_names}""" )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , lowercase , atol=1E-4 )
else:
print('''Logits:''' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1E-4 )
print('''Logits ok!''' )
# verify loss, if applicable
if model_name == "videomae-base-short":
_UpperCamelCase: List[Any] = outputs.loss
assert torch.allclose(lowercase , lowercase , atol=1E-4 )
print('''Loss ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase )
model.save_pretrained(lowercase )
if push_to_hub:
print('''Pushing to the hub...''' )
model.push_to_hub(lowercase , organization='''nielsr''' )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4''',
type=str,
help=(
'''URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'''
''' download link.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/Users/nielsrogge/Documents/VideoMAE/Test''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--model_name''', default='''videomae-base''', type=str, help='''Name of the model.''')
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCAmelCase_ = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 271
|
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def lowerCAmelCase_ ( lowercase: str = "" ) -> dict[str, float]:
'''simple docstring'''
_UpperCamelCase: Tuple = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250'''
_UpperCamelCase: Union[str, Any] = BeautifulSoup(requests.get(lowercase ).text , '''html.parser''' )
_UpperCamelCase: List[Any] = soup.find_all('''td''' , attrs='''titleColumn''' )
_UpperCamelCase: str = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(lowercase , lowercase )
}
def lowerCAmelCase_ ( lowercase: str = "IMDb_Top_250_Movies.csv" ) -> None:
'''simple docstring'''
_UpperCamelCase: Any = get_imdb_top_aaa_movies()
with open(lowercase , '''w''' , newline='''''' ) as out_file:
_UpperCamelCase: Optional[Any] = csv.writer(lowercase )
writer.writerow(['''Movie title''', '''IMDb rating'''] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 271
| 1
|
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase = "▁" , _lowerCAmelCase = True , _lowerCAmelCase = "<unk>" , _lowerCAmelCase = "</s>" , _lowerCAmelCase = "<pad>" , ):
lowerCamelCase__ = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
lowerCamelCase__ = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
lowerCamelCase__ = token_dict["token"]
lowerCamelCase__ = Tokenizer(Unigram() )
lowerCamelCase__ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}" ) , " " ),
normalizers.Lowercase(),
] )
lowerCamelCase__ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase ),
pre_tokenizers.Digits(individual_digits=_lowerCAmelCase ),
pre_tokenizers.Punctuation(),
] )
lowerCamelCase__ = decoders.Metaspace(replacement=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase )
lowerCamelCase__ = TemplateProcessing(
single=F"$A {self.special_tokens['eos']['token']}" , special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] , )
lowerCamelCase__ = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
def __magic_name__ ( self , _lowerCAmelCase , _lowerCAmelCase = 8000 , _lowerCAmelCase = True , ):
lowerCamelCase__ = trainers.UnigramTrainer(
vocab_size=_lowerCAmelCase , special_tokens=self.special_tokens_list , show_progress=_lowerCAmelCase , )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = [files]
self._tokenizer.train(_lowerCAmelCase , trainer=_lowerCAmelCase )
self.add_unk_id()
def __magic_name__ ( self , _lowerCAmelCase , _lowerCAmelCase = 8000 , _lowerCAmelCase = True , ):
lowerCamelCase__ = trainers.UnigramTrainer(
vocab_size=_lowerCAmelCase , special_tokens=self.special_tokens_list , show_progress=_lowerCAmelCase , )
self._tokenizer.train_from_iterator(_lowerCAmelCase , trainer=_lowerCAmelCase )
self.add_unk_id()
def __magic_name__ ( self ):
lowerCamelCase__ = json.loads(self._tokenizer.to_str() )
lowerCamelCase__ = self.special_tokens["unk"]["id"]
lowerCamelCase__ = Tokenizer.from_str(json.dumps(_lowerCAmelCase ) )
| 360
|
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
A_ = get_logger(__name__)
A_ = r"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
@add_start_docstrings(_lowerCAmelCase )
def __call__( self , _lowerCAmelCase , _lowerCAmelCase ):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
@add_start_docstrings(_lowerCAmelCase )
def __call__( self , _lowerCAmelCase , _lowerCAmelCase ):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
@add_start_docstrings(_lowerCAmelCase )
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ):
for processor in self:
lowerCamelCase__ = inspect.signature(processor.__call__ ).parameters
if len(_lowerCAmelCase ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F"Make sure that all the required parameters: {list(function_args.keys() )} for "
F"{processor.__class__} are passed to the logits processor." )
lowerCamelCase__ = processor(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
else:
lowerCamelCase__ = processor(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or not (temperature > 0):
raise ValueError(F"`temperature` has to be a strictly positive float, but is {temperature}" )
lowerCamelCase__ = temperature
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = scores / self.temperature
return scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase , _lowerCAmelCase = -float("Inf" ) , _lowerCAmelCase = 1 ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F"`top_p` has to be a float > 0 and < 1, but is {top_p}" )
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or (min_tokens_to_keep < 1):
raise ValueError(F"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}" )
lowerCamelCase__ = top_p
lowerCamelCase__ = filter_value
lowerCamelCase__ = min_tokens_to_keep
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ , lowerCamelCase__ = lax.top_k(_lowerCAmelCase , scores.shape[-1] )
lowerCamelCase__ = jnp.full_like(_lowerCAmelCase , self.filter_value )
lowerCamelCase__ = jax.nn.softmax(_lowerCAmelCase , axis=-1 ).cumsum(axis=-1 )
lowerCamelCase__ = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
lowerCamelCase__ = jnp.roll(_lowerCAmelCase , 1 )
score_mask |= score_mask.at[:, 0].set(_lowerCAmelCase )
# min tokens to keep
lowerCamelCase__ = score_mask.at[:, : self.min_tokens_to_keep].set(_lowerCAmelCase )
lowerCamelCase__ = jnp.where(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__ = jax.lax.sort_key_val(_lowerCAmelCase , _lowerCAmelCase )[-1]
return next_scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase , _lowerCAmelCase = -float("Inf" ) , _lowerCAmelCase = 1 ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or top_k <= 0:
raise ValueError(F"`top_k` has to be a strictly positive integer, but is {top_k}" )
lowerCamelCase__ = max(_lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__ = filter_value
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ , lowerCamelCase__ = scores.shape
lowerCamelCase__ = jnp.full(batch_size * vocab_size , self.filter_value )
lowerCamelCase__ = min(self.top_k , scores.shape[-1] ) # Safety check
lowerCamelCase__ , lowerCamelCase__ = lax.top_k(_lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__ = jnp.broadcast_to((jnp.arange(_lowerCAmelCase ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
lowerCamelCase__ = topk_scores.flatten()
lowerCamelCase__ = topk_indices.flatten() + shift
lowerCamelCase__ = next_scores_flat.at[topk_indices_flat].set(_lowerCAmelCase )
lowerCamelCase__ = next_scores_flat.reshape(_lowerCAmelCase , _lowerCAmelCase )
return next_scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase ):
lowerCamelCase__ = bos_token_id
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = jnp.full(scores.shape , -float("inf" ) )
lowerCamelCase__ = 1 - jnp.bool_(cur_len - 1 )
lowerCamelCase__ = jnp.where(_lowerCAmelCase , new_scores.at[:, self.bos_token_id].set(0 ) , _lowerCAmelCase )
return scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = max_length
lowerCamelCase__ = eos_token_id
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = jnp.full(scores.shape , -float("inf" ) )
lowerCamelCase__ = 1 - jnp.bool_(cur_len - self.max_length + 1 )
lowerCamelCase__ = jnp.where(_lowerCAmelCase , new_scores.at[:, self.eos_token_id].set(0 ) , _lowerCAmelCase )
return scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or min_length < 0:
raise ValueError(F"`min_length` has to be a positive integer, but is {min_length}" )
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or eos_token_id < 0:
raise ValueError(F"`eos_token_id` has to be a positive integer, but is {eos_token_id}" )
lowerCamelCase__ = min_length
lowerCamelCase__ = eos_token_id
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
# create boolean flag to decide if min length penalty should be applied
lowerCamelCase__ = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
lowerCamelCase__ = jnp.where(_lowerCAmelCase , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , _lowerCAmelCase )
return scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = list(_lowerCAmelCase )
lowerCamelCase__ = begin_index
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = 1 - jnp.bool_(cur_len - self.begin_index )
lowerCamelCase__ = jnp.where(_lowerCAmelCase , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , _lowerCAmelCase )
return scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase ):
lowerCamelCase__ = list(_lowerCAmelCase )
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase ):
lowerCamelCase__ = dict(_lowerCAmelCase )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
lowerCamelCase__ = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
lowerCamelCase__ = force_token_array.at[index].set(_lowerCAmelCase )
lowerCamelCase__ = jnp.intaa(_lowerCAmelCase )
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
def _force_token(_lowerCAmelCase ):
lowerCamelCase__ = scores.shape[0]
lowerCamelCase__ = self.force_token_array[generation_idx]
lowerCamelCase__ = jnp.ones_like(_lowerCAmelCase , dtype=scores.dtype ) * -float("inf" )
lowerCamelCase__ = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
lowerCamelCase__ = lax.dynamic_update_slice(_lowerCAmelCase , _lowerCAmelCase , (0, current_token) )
return new_scores
lowerCamelCase__ = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(_lowerCAmelCase ) , lambda: scores , ) , )
return scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = generate_config.eos_token_id
lowerCamelCase__ = generate_config.no_timestamps_token_id
lowerCamelCase__ = generate_config.no_timestamps_token_id + 1
lowerCamelCase__ = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(_lowerCAmelCase , "max_initial_timestamp_index" ):
lowerCamelCase__ = generate_config.max_initial_timestamp_index
else:
lowerCamelCase__ = model_config.vocab_size
if self.max_initial_timestamp_index is None:
lowerCamelCase__ = model_config.vocab_size
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
# suppress <|notimestamps|> which is handled by without_timestamps
lowerCamelCase__ = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(_lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = jnp.where((cur_len - self.begin_index) >= 1 , _lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__ = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , _lowerCAmelCase , )
lowerCamelCase__ = jnp.where((cur_len - self.begin_index) < 2 , _lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__ = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , _lowerCAmelCase , _lowerCAmelCase , )
return jnp.where(
_lowerCAmelCase , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , _lowerCAmelCase , )
lowerCamelCase__ = jax.vmap(_lowerCAmelCase )(_lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__ = jnp.where(cur_len == self.begin_index , _lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__ = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , _lowerCAmelCase , )
lowerCamelCase__ = self.timestamp_begin + self.max_initial_timestamp_index
lowerCamelCase__ = jnp.where(
_lowerCAmelCase , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , _lowerCAmelCase , )
# if sum of probability over timestamps is above any other token, sample timestamp
lowerCamelCase__ = jax.nn.log_softmax(_lowerCAmelCase , axis=-1 )
def handle_cumulative_probs(_lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
lowerCamelCase__ = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , _lowerCAmelCase , )
lowerCamelCase__ = jax.vmap(_lowerCAmelCase )(_lowerCAmelCase , _lowerCAmelCase )
return scores
| 360
| 1
|
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ ( __lowercase : int = "https://www.worldometers.info/coronavirus" ) -> dict:
'''simple docstring'''
_UpperCAmelCase = BeautifulSoup(requests.get(__lowercase ).text , "html.parser" )
_UpperCAmelCase = soup.findAll("h1" )
_UpperCAmelCase = soup.findAll("div" , {"class": "maincounter-number"} )
keys += soup.findAll("span" , {"class": "panel-title"} )
values += soup.findAll("div" , {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(__lowercase , __lowercase )}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(F"{key}\n{value}\n")
| 236
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : str = ["image_processor", "tokenizer"]
_UpperCamelCase : Union[str, Any] = "AutoImageProcessor"
_UpperCamelCase : Union[str, Any] = "AutoTokenizer"
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Union[str, Any] = self.image_processor
def __call__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_lowercase : Dict = self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if images is not None:
_lowercase : Union[str, Any] = self.image_processor(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if text is not None and images is not None:
_lowercase : Optional[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCAmelCase ) , tensor_type=_lowerCAmelCase )
def __a ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def __a ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
def __a ( self ):
return ["input_ids", "attention_mask", "pixel_values"]
| 66
| 0
|
'''simple docstring'''
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
A__ : Any =logging.get_logger(__name__)
@add_end_docstrings(_SCREAMING_SNAKE_CASE )
class __A ( _SCREAMING_SNAKE_CASE ):
def __init__( self : Union[str, Any] , *lowerCamelCase : int , **lowerCamelCase : Optional[int] ):
"""simple docstring"""
super().__init__(*lowerCamelCase , **lowerCamelCase )
self.check_model_type(lowerCamelCase )
def lowercase_( self : Any , lowerCamelCase : Dict=None , lowerCamelCase : Union[str, Any]=None , lowerCamelCase : int=None , **lowerCamelCase : int ):
"""simple docstring"""
__A , __A : Tuple = {}, {}
if padding is not None:
__A : Any = padding
if truncation is not None:
__A : Optional[Any] = truncation
if top_k is not None:
__A : List[str] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Any , lowerCamelCase : Union["Image.Image", str] , lowerCamelCase : str = None , **lowerCamelCase : Tuple ):
"""simple docstring"""
if isinstance(lowerCamelCase , (Image.Image, str) ) and isinstance(lowerCamelCase , lowerCamelCase ):
__A : Tuple = {"""image""": image, """question""": question}
else:
__A : List[Any] = image
__A : Any = super().__call__(lowerCamelCase , **lowerCamelCase )
return results
def lowercase_( self : int , lowerCamelCase : Optional[int] , lowerCamelCase : Dict=False , lowerCamelCase : str=False ):
"""simple docstring"""
__A : List[str] = load_image(inputs["""image"""] )
__A : Optional[Any] = self.tokenizer(
inputs["""question"""] , return_tensors=self.framework , padding=lowerCamelCase , truncation=lowerCamelCase )
__A : Union[str, Any] = self.image_processor(images=lowerCamelCase , return_tensors=self.framework )
model_inputs.update(lowerCamelCase )
return model_inputs
def lowercase_( self : List[str] , lowerCamelCase : Tuple ):
"""simple docstring"""
__A : List[str] = self.model(**lowerCamelCase )
return model_outputs
def lowercase_( self : str , lowerCamelCase : Optional[Any] , lowerCamelCase : List[Any]=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
__A : str = self.model.config.num_labels
if self.framework == "pt":
__A : Optional[Any] = model_outputs.logits.sigmoid()[0]
__A , __A : int = probs.topk(lowerCamelCase )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
__A : Optional[Any] = scores.tolist()
__A : Dict = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowerCamelCase , lowerCamelCase )]
| 499
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
A__ : Any =logging.get_logger(__name__)
A__ : Any ={
'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json',
}
class __A ( _SCREAMING_SNAKE_CASE ):
lowerCamelCase ='''instructblip_vision_model'''
def __init__( self : Tuple , lowerCamelCase : Optional[int]=14_08 , lowerCamelCase : str=61_44 , lowerCamelCase : List[Any]=39 , lowerCamelCase : Optional[Any]=16 , lowerCamelCase : Optional[int]=2_24 , lowerCamelCase : Any=14 , lowerCamelCase : str="gelu" , lowerCamelCase : str=1e-6 , lowerCamelCase : Dict=0.0 , lowerCamelCase : Dict=1e-1_0 , lowerCamelCase : Optional[Any]=True , **lowerCamelCase : List[str] , ):
"""simple docstring"""
super().__init__(**lowerCamelCase )
__A : int = hidden_size
__A : List[str] = intermediate_size
__A : Tuple = num_hidden_layers
__A : str = num_attention_heads
__A : str = patch_size
__A : Dict = image_size
__A : Any = initializer_range
__A : int = attention_dropout
__A : str = layer_norm_eps
__A : Optional[Any] = hidden_act
__A : List[str] = qkv_bias
@classmethod
def lowercase_( cls : Union[str, Any] , lowerCamelCase : Union[str, os.PathLike] , **lowerCamelCase : str ):
"""simple docstring"""
cls._set_token_in_kwargs(lowerCamelCase )
__A , __A : int = cls.get_config_dict(lowerCamelCase , **lowerCamelCase )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
__A : Tuple = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowerCamelCase , **lowerCamelCase )
class __A ( _SCREAMING_SNAKE_CASE ):
lowerCamelCase ='''instructblip_qformer'''
def __init__( self : Tuple , lowerCamelCase : int=3_05_22 , lowerCamelCase : Tuple=7_68 , lowerCamelCase : Optional[Any]=12 , lowerCamelCase : Tuple=12 , lowerCamelCase : str=30_72 , lowerCamelCase : int="gelu" , lowerCamelCase : Union[str, Any]=0.1 , lowerCamelCase : List[str]=0.1 , lowerCamelCase : int=5_12 , lowerCamelCase : Tuple=0.02 , lowerCamelCase : List[str]=1e-1_2 , lowerCamelCase : int=0 , lowerCamelCase : List[str]="absolute" , lowerCamelCase : Optional[Any]=2 , lowerCamelCase : List[Any]=14_08 , **lowerCamelCase : List[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase , **lowerCamelCase )
__A : List[Any] = vocab_size
__A : List[str] = hidden_size
__A : str = num_hidden_layers
__A : str = num_attention_heads
__A : List[Any] = hidden_act
__A : str = intermediate_size
__A : List[Any] = hidden_dropout_prob
__A : Tuple = attention_probs_dropout_prob
__A : Optional[Any] = max_position_embeddings
__A : Optional[int] = initializer_range
__A : Any = layer_norm_eps
__A : List[Any] = position_embedding_type
__A : List[str] = cross_attention_frequency
__A : Dict = encoder_hidden_size
@classmethod
def lowercase_( cls : Any , lowerCamelCase : Union[str, os.PathLike] , **lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
cls._set_token_in_kwargs(lowerCamelCase )
__A , __A : List[str] = cls.get_config_dict(lowerCamelCase , **lowerCamelCase )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
__A : Optional[int] = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowerCamelCase , **lowerCamelCase )
class __A ( _SCREAMING_SNAKE_CASE ):
lowerCamelCase ='''instructblip'''
lowerCamelCase =True
def __init__( self : Any , lowerCamelCase : Optional[Any]=None , lowerCamelCase : List[str]=None , lowerCamelCase : Any=None , lowerCamelCase : Any=32 , **lowerCamelCase : int ):
"""simple docstring"""
super().__init__(**lowerCamelCase )
if vision_config is None:
__A : int = {}
logger.info("""vision_config is None. initializing the InstructBlipVisionConfig with default values.""" )
if qformer_config is None:
__A : Dict = {}
logger.info("""qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.""" )
if text_config is None:
__A : Any = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
__A : List[Any] = InstructBlipVisionConfig(**lowerCamelCase )
__A : Union[str, Any] = InstructBlipQFormerConfig(**lowerCamelCase )
__A : Tuple = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
__A : List[str] = CONFIG_MAPPING[text_model_type](**lowerCamelCase )
__A : Optional[int] = self.text_config.tie_word_embeddings
__A : Dict = self.text_config.is_encoder_decoder
__A : Optional[int] = num_query_tokens
__A : int = self.vision_config.hidden_size
__A : str = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__A : Optional[Any] = 1.0
__A : Optional[int] = 0.02
@classmethod
def lowercase_( cls : List[str] , lowerCamelCase : InstructBlipVisionConfig , lowerCamelCase : InstructBlipQFormerConfig , lowerCamelCase : PretrainedConfig , **lowerCamelCase : Optional[int] , ):
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowerCamelCase , )
def lowercase_( self : List[Any] ):
"""simple docstring"""
__A : Tuple = copy.deepcopy(self.__dict__ )
__A : Optional[int] = self.vision_config.to_dict()
__A : Optional[Any] = self.qformer_config.to_dict()
__A : List[Any] = self.text_config.to_dict()
__A : Optional[int] = self.__class__.model_type
return output
| 499
| 1
|
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCamelCase : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = ReformerTokenizer
UpperCamelCase__ = ReformerTokenizerFast
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = True
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
super().setUp()
a = ReformerTokenizer(__magic_name__ , keep_accents=__magic_name__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = """<s>"""
a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(__magic_name__ ) , 1000 )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a = self.get_tokenizer()
a = self.get_rust_tokenizer()
a = """I was born in 92000, and this is falsé."""
a = tokenizer.tokenize(__magic_name__ )
a = rust_tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
a = rust_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
a = self.get_rust_tokenizer()
a = tokenizer.encode(__magic_name__ )
a = rust_tokenizer.encode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
def lowerCamelCase__ ( self :int , __magic_name__ :List[str]=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a = self.rust_tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ )
# Simple input
a = """This is a simple input"""
a = ["""This is a simple input 1""", """This is a simple input 2"""]
a = ("""This is a simple input""", """This is a pair""")
a = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(__magic_name__ , tokenizer_r.encode , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" )
# Simple input
self.assertRaises(__magic_name__ , tokenizer_r.encode_plus , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" )
# Simple input
self.assertRaises(
__magic_name__ , tokenizer_r.batch_encode_plus , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" , )
# Pair input
self.assertRaises(__magic_name__ , tokenizer_r.encode , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" )
# Pair input
self.assertRaises(__magic_name__ , tokenizer_r.encode_plus , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" )
# Pair input
self.assertRaises(
__magic_name__ , tokenizer_r.batch_encode_plus , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" , )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = ReformerTokenizer(__magic_name__ , keep_accents=__magic_name__ )
a = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__magic_name__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__magic_name__ ) , [285, 46, 10, 170, 382] , )
a = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__magic_name__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
a = tokenizer.convert_tokens_to_ids(__magic_name__ )
self.assertListEqual(
__magic_name__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
a = tokenizer.convert_ids_to_tokens(__magic_name__ )
self.assertListEqual(
__magic_name__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
return ReformerTokenizer.from_pretrained("""google/reformer-crime-and-punishment""" )
@slow
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = """Hello World!"""
a = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) )
@slow
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
a = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) )
@require_torch
@slow
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
a = list(self.big_tokenizer.get_vocab().keys() )[:10]
a = """ """.join(__magic_name__ )
a = self.big_tokenizer.encode_plus(__magic_name__ , return_tensors="""pt""" )
a = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="""pt""" )
a = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
a = encoded_sequence["""input_ids"""].shape
a = ReformerModel(__magic_name__ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__magic_name__ )
model(**__magic_name__ )
@slow
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = {"""input_ids""": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
a = [
"""This is a very simple sentence.""",
"""The quick brown fox jumps over the lazy dog.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name="""google/reformer-crime-and-punishment""" , revision="""0e6c3decb8211d49bf881013425dc8b0448b3f5a""" , padding=__magic_name__ , sequences=__magic_name__ , )
| 468
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = "▁"
__UpperCamelCase : Any = {"vocab_file": "sentencepiece.bpe.model"}
__UpperCamelCase : int = {
"vocab_file": {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model",
}
}
__UpperCamelCase : Dict = {
"facebook/xglm-564M": 2_048,
}
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self :Optional[Any] , __magic_name__ :List[str] , __magic_name__ :Tuple="<s>" , __magic_name__ :List[Any]="</s>" , __magic_name__ :List[str]="</s>" , __magic_name__ :Any="<s>" , __magic_name__ :Tuple="<unk>" , __magic_name__ :int="<pad>" , __magic_name__ :Optional[Dict[str, Any]] = None , **__magic_name__ :Optional[Any] , ):
'''simple docstring'''
a = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
a = 7
a = [F'<madeupword{i}>' for i in range(self.num_madeup_words )]
a = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , pad_token=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , )
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__magic_name__ ) )
a = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
a = 1
# Mimic fairseq token-to-id alignment for the first 4 token
a = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
a = len(self.sp_model )
a = {F'<madeupword{i}>': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(__magic_name__ )
a = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self :List[Any] ):
'''simple docstring'''
a = self.__dict__.copy()
a = None
a = self.sp_model.serialized_model_proto()
return state
def __setstate__( self :Any , __magic_name__ :Any ):
'''simple docstring'''
a = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
a = {}
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCamelCase__ ( self :Any , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
a = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def lowerCamelCase__ ( self :Any , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None , __magic_name__ :bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ )
if token_ids_a is None:
return [1] + ([0] * len(__magic_name__ ))
return [1] + ([0] * len(__magic_name__ )) + [1, 1] + ([0] * len(__magic_name__ ))
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ):
'''simple docstring'''
a = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase__ ( self :Any , __magic_name__ :str ):
'''simple docstring'''
return self.sp_model.encode(__magic_name__ , out_type=__magic_name__ )
def lowerCamelCase__ ( self :List[str] , __magic_name__ :Any ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a = self.sp_model.PieceToId(__magic_name__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase__ ( self :List[str] , __magic_name__ :Dict ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase__ ( self :List[str] , __magic_name__ :List[str] ):
'''simple docstring'''
a = """""".join(__magic_name__ ).replace(__magic_name__ , """ """ ).strip()
return out_string
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :str , __magic_name__ :Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__magic_name__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
a = os.path.join(
__magic_name__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __magic_name__ )
elif not os.path.isfile(self.vocab_file ):
with open(__magic_name__ , """wb""" ) as fi:
a = self.sp_model.serialized_model_proto()
fi.write(__magic_name__ )
return (out_vocab_file,)
| 468
| 1
|
from copy import deepcopy
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase__ = None , lowercase__ = None ):
'''simple docstring'''
if arr is None and size is not None:
__A =size
__A =[0] * size
elif arr is not None:
self.init(lowercase__ )
else:
raise ValueError('''Either arr or size must be specified''' )
def __UpperCamelCase ( self , lowercase__ ):
'''simple docstring'''
__A =len(lowercase__ )
__A =deepcopy(lowercase__ )
for i in range(1 , self.size ):
__A =self.next_(lowercase__ )
if j < self.size:
self.tree[j] += self.tree[i]
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
__A =self.next_(lowercase__ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def __UpperCamelCase ( lowercase__ ):
'''simple docstring'''
return index + (index & (-index))
@staticmethod
def __UpperCamelCase ( lowercase__ ):
'''simple docstring'''
return index - (index & (-index))
def __UpperCamelCase ( self , lowercase__ , lowercase__ ):
'''simple docstring'''
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
__A =self.next_(lowercase__ )
def __UpperCamelCase ( self , lowercase__ , lowercase__ ):
'''simple docstring'''
self.add(lowercase__ , value - self.get(lowercase__ ) )
def __UpperCamelCase ( self , lowercase__ ):
'''simple docstring'''
if right == 0:
return 0
__A =self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
__A =self.prev(lowercase__ )
return result
def __UpperCamelCase ( self , lowercase__ , lowercase__ ):
'''simple docstring'''
return self.prefix(lowercase__ ) - self.prefix(lowercase__ )
def __UpperCamelCase ( self , lowercase__ ):
'''simple docstring'''
return self.query(lowercase__ , index + 1 )
def __UpperCamelCase ( self , lowercase__ ):
'''simple docstring'''
value -= self.tree[0]
if value < 0:
return -1
__A =1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
__A =0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase : Dict = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = ['''ViTFeatureExtractor''']
_lowerCamelCase : int = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
_lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 516
| 0
|
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
_lowercase = logging.getLogger(__name__)
_lowercase = 50 # max width of layer names
_lowercase = 70 # max width of quantizer names
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Dict = parser.add_argument_group("quant_trainer arguments")
group.add_argument("--wprec" , type=snake_case__ , default=8 , help="weight precision")
group.add_argument("--aprec" , type=snake_case__ , default=8 , help="activation precision")
group.add_argument("--quant-per-tensor" , action="store_true" , help="per tensor weight scaling")
group.add_argument("--quant-disable" , action="store_true" , help="disable all quantizers")
group.add_argument("--quant-disable-embeddings" , action="store_true" , help="disable all embeddings quantizers")
group.add_argument("--quant-disable-keyword" , type=snake_case__ , nargs="+" , help="disable quantizers by keyword")
group.add_argument("--quant-disable-layer-module" , type=snake_case__ , help="disable quantizers by keyword under layer.")
group.add_argument("--quant-enable-layer-module" , type=snake_case__ , help="enable quantizers by keyword under layer")
group.add_argument("--calibrator" , default="max" , help="which quantization range calibrator to use")
group.add_argument("--percentile" , default=snake_case__ , type=snake_case__ , help="percentile for PercentileCalibrator")
group.add_argument("--fuse-qkv" , action="store_true" , help="use the same scale factor for qkv")
group.add_argument("--clip-gelu" , metavar="N" , type=snake_case__ , help="clip gelu output maximum value to N")
group.add_argument(
"--recalibrate-weights" , action="store_true" , help=(
"recalibrate weight amaxes by taking the max of the weights."
" amaxes will be computed with the current quantization granularity (axis)."
) , )
def UpperCamelCase ( snake_case__):
if args.calibrator == "max":
lowerCAmelCase_ : List[Any] = "max"
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("Specify --percentile when using percentile calibrator")
lowerCAmelCase_ : Tuple = "histogram"
elif args.calibrator == "mse":
lowerCAmelCase_ : Dict = "histogram"
else:
raise ValueError(F'''Invalid calibrator {args.calibrator}''')
lowerCAmelCase_ : Optional[Any] = QuantDescriptor(num_bits=args.aprec , calib_method=snake_case__)
lowerCAmelCase_ : Tuple = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)))
quant_nn.QuantLinear.set_default_quant_desc_input(snake_case__)
quant_nn.QuantLinear.set_default_quant_desc_weight(snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=False , snake_case__=False):
logger.info("Configuring Model for Quantization")
logger.info(F'''using quantization package {pytorch_quantization.__file__}''')
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(snake_case__ , ["embeddings"] , which="weight" , _disabled=snake_case__)
if args.quant_disable:
set_quantizer_by_name(snake_case__ , [""] , _disabled=snake_case__)
if args.quant_disable_keyword:
set_quantizer_by_name(snake_case__ , args.quant_disable_keyword , _disabled=snake_case__)
if args.quant_disable_layer_module:
set_quantizer_by_name(snake_case__ , [R"layer.\d+." + args.quant_disable_layer_module] , _disabled=snake_case__)
if args.quant_enable_layer_module:
set_quantizer_by_name(snake_case__ , [R"layer.\d+." + args.quant_enable_layer_module] , _disabled=snake_case__)
if args.recalibrate_weights:
recalibrate_weights(snake_case__)
if args.fuse_qkv:
fuse_qkv(snake_case__ , snake_case__)
if args.clip_gelu:
clip_gelu(snake_case__ , args.clip_gelu)
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(snake_case__)
def UpperCamelCase ( snake_case__):
logger.info("Enabling Calibration")
for name, module in model.named_modules():
if name.endswith("_quantizer"):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F'''{name:80}: {module}''')
def UpperCamelCase ( snake_case__ , snake_case__):
logger.info("Loading calibrated amax")
for name, module in model.named_modules():
if name.endswith("_quantizer"):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator):
module.load_calib_amax()
else:
module.load_calib_amax("percentile" , percentile=args.percentile)
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__):
def fusea(snake_case__ , snake_case__ , snake_case__):
for mod in [qq, qk, qv]:
if not hasattr(snake_case__ , "_amax"):
print(" WARNING: NO AMAX BUFFER")
return
lowerCAmelCase_ : List[Any] = qq._amax.detach().item()
lowerCAmelCase_ : Optional[Any] = qk._amax.detach().item()
lowerCAmelCase_ : Optional[int] = qv._amax.detach().item()
lowerCAmelCase_ : List[Any] = max(snake_case__ , snake_case__ , snake_case__)
qq._amax.fill_(snake_case__)
qk._amax.fill_(snake_case__)
qv._amax.fill_(snake_case__)
logger.info(F''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''')
for name, mod in model.named_modules():
if name.endswith(".attention.self"):
logger.info(F'''FUSE_QKV: {name:{name_width}}''')
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer)
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer)
def UpperCamelCase ( snake_case__ , snake_case__):
for name, mod in model.named_modules():
if name.endswith(".output.dense") and not name.endswith("attention.output.dense"):
lowerCAmelCase_ : Optional[Any] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=snake_case__)
lowerCAmelCase_ : Tuple = mod._input_quantizer._amax.data.detach().item()
logger.info(F'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''')
def UpperCamelCase ( snake_case__):
for name, mod in model.named_modules():
if hasattr(snake_case__ , "_weight_quantizer") and mod._weight_quantizer.axis is not None:
lowerCAmelCase_ : Optional[Any] = mod.weight.shape[0]
lowerCAmelCase_ : str = mod._weight_quantizer._amax.detach()
lowerCAmelCase_ : Any = torch.ones(snake_case__ , dtype=amax.dtype , device=amax.device) * amax
print(F'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''')
def UpperCamelCase ( snake_case__):
for name, mod in model.named_modules():
if hasattr(snake_case__ , "_weight_quantizer"):
if not hasattr(mod.weight_quantizer , "_amax"):
print("RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER")
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
lowerCAmelCase_ : Any = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis)
lowerCAmelCase_ : List[Any] = set(range(len(mod.weight.size()))) - axis_set
lowerCAmelCase_ : Tuple = pytorch_quantization.utils.reduce_amax(mod.weight , axis=snake_case__ , keepdims=snake_case__).detach()
logger.info(F'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''')
lowerCAmelCase_ : Tuple = amax
def UpperCamelCase ( snake_case__ , snake_case__=25 , snake_case__=1_80 , snake_case__=None):
if ignore is None:
lowerCAmelCase_ : Dict = []
elif not isinstance(snake_case__ , snake_case__):
lowerCAmelCase_ : List[str] = [ignore]
lowerCAmelCase_ : List[Any] = 0
for name, mod in model.named_modules():
if not hasattr(snake_case__ , "weight"):
continue
lowerCAmelCase_ : Any = max(snake_case__ , len(snake_case__))
for name, mod in model.named_modules():
lowerCAmelCase_ : int = getattr(snake_case__ , "_input_quantizer" , snake_case__)
lowerCAmelCase_ : Tuple = getattr(snake_case__ , "_weight_quantizer" , snake_case__)
if not hasattr(snake_case__ , "weight"):
continue
if type(snake_case__) in ignore:
continue
if [True for s in ignore if type(snake_case__) is str and s in name]:
continue
lowerCAmelCase_ : List[Any] = F'''Act:{input_q.extra_repr()}'''
lowerCAmelCase_ : Tuple = F'''Wgt:{weight_q.extra_repr()}'''
lowerCAmelCase_ : Optional[Any] = F'''{name:{name_width}} {act_str} {wgt_str}'''
if len(snake_case__) <= line_width:
logger.info(snake_case__)
else:
logger.info(F'''{name:{name_width}} {act_str}''')
logger.info(F'''{" ":{name_width}} {wgt_str}''')
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Any = 0
for name, mod in model.named_modules():
if isinstance(snake_case__ , pytorch_quantization.nn.TensorQuantizer):
print(F'''{name:80} {mod}''')
count += 1
print(F'''{count} TensorQuantizers found in model''')
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Tuple = getattr(snake_case__ , snake_case__ , snake_case__)
if quantizer_mod is not None:
assert hasattr(snake_case__ , snake_case__)
setattr(snake_case__ , snake_case__ , snake_case__)
else:
logger.warning(F'''{name} has no {quantizer}''')
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__="both" , **snake_case__):
lowerCAmelCase_ : str = F'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += F''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(snake_case__ , snake_case__ , "_input_quantizer" , snake_case__ , snake_case__)
if which in ["weight", "both"]:
set_quantizer(snake_case__ , snake_case__ , "_weight_quantizer" , snake_case__ , snake_case__)
logger.info(snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__ , **snake_case__):
for name, mod in model.named_modules():
if hasattr(snake_case__ , "_input_quantizer") or hasattr(snake_case__ , "_weight_quantizer"):
for n in names:
if re.search(snake_case__ , snake_case__):
set_quantizers(snake_case__ , snake_case__ , **snake_case__)
elif name.endswith("_quantizer"):
for n in names:
if re.search(snake_case__ , snake_case__):
lowerCAmelCase_ : str = F'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += F''' {k}={v}'''
setattr(snake_case__ , snake_case__ , snake_case__)
logger.info(snake_case__)
| 659
|
from __future__ import annotations
from collections.abc import Callable
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = 1_00 , ):
lowerCAmelCase_ : Any = x_start
lowerCAmelCase_ : Optional[Any] = fnc(snake_case__)
lowerCAmelCase_ : Union[str, Any] = 0.0
for _ in range(snake_case__):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
lowerCAmelCase_ : Any = (x_end - x_start) / steps + xa
lowerCAmelCase_ : Dict = fnc(snake_case__)
area += abs(fxa + fxa) * (xa - xa) / 2
# Increment step
lowerCAmelCase_ : int = xa
lowerCAmelCase_ : str = fxa
return area
if __name__ == "__main__":
def UpperCamelCase ( snake_case__):
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
_lowercase = 10
while i <= 100000:
print(f"with {i} steps: {trapezoidal_area(f, -5, 5, i)}")
i *= 10
| 659
| 1
|
'''simple docstring'''
__A : int = {
"""Pillow""": """Pillow""",
"""accelerate""": """accelerate>=0.11.0""",
"""compel""": """compel==0.1.8""",
"""black""": """black~=23.1""",
"""datasets""": """datasets""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.13.2""",
"""requests-mock""": """requests-mock==1.10.0""",
"""importlib_metadata""": """importlib_metadata""",
"""invisible-watermark""": """invisible-watermark""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2""",
"""jaxlib""": """jaxlib>=0.1.65""",
"""Jinja2""": """Jinja2""",
"""k-diffusion""": """k-diffusion>=0.0.12""",
"""torchsde""": """torchsde""",
"""note_seq""": """note_seq""",
"""librosa""": """librosa""",
"""numpy""": """numpy""",
"""omegaconf""": """omegaconf""",
"""parameterized""": """parameterized""",
"""protobuf""": """protobuf>=3.20.3,<4""",
"""pytest""": """pytest""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""ruff""": """ruff>=0.0.241""",
"""safetensors""": """safetensors""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""scipy""": """scipy""",
"""onnx""": """onnx""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""tensorboard""": """tensorboard""",
"""torch""": """torch>=1.4""",
"""torchvision""": """torchvision""",
"""transformers""": """transformers>=4.25.1""",
"""urllib3""": """urllib3<=2.0.0""",
}
| 720
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase_ ( lowercase__ , lowercase__ , lowercase__):
lowerCamelCase__ = list(range(len(lowercase__)))
lowerCamelCase__ = [v / w for v, w in zip(lowercase__ , lowercase__)]
index.sort(key=lambda lowercase__: ratio[i] , reverse=lowercase__)
lowerCamelCase__ = 0
lowerCamelCase__ = [0] * len(lowercase__)
for i in index:
if weight[i] <= capacity:
lowerCamelCase__ = 1
max_value += value[i]
capacity -= weight[i]
else:
lowerCamelCase__ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 187
| 0
|
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class _a :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=64 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=64 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ):
"""simple docstring"""
a__ : str = parent
a__ : Optional[Any] = batch_size
a__ : List[str] = seq_length
a__ : List[Any] = is_training
a__ : List[str] = use_input_mask
a__ : List[Any] = use_token_type_ids
a__ : Tuple = use_labels
a__ : int = vocab_size
a__ : Optional[int] = hidden_size
a__ : Tuple = num_hidden_layers
a__ : str = num_attention_heads
a__ : str = intermediate_size
a__ : Dict = hidden_act
a__ : Dict = hidden_dropout_prob
a__ : List[Any] = attention_probs_dropout_prob
a__ : List[Any] = max_position_embeddings
a__ : List[Any] = type_vocab_size
a__ : Union[str, Any] = type_sequence_label_size
a__ : Any = initializer_range
a__ : Dict = num_labels
a__ : List[Any] = num_choices
a__ : List[str] = scope
def _A ( self ):
"""simple docstring"""
return MPNetConfig.from_pretrained("microsoft/mpnet-base" )
def _A ( self ):
"""simple docstring"""
a__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ : Any = None
if self.use_input_mask:
a__ : str = random_attention_mask([self.batch_size, self.seq_length] )
a__ : Any = None
a__ : int = None
a__ : Dict = None
if self.use_labels:
a__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a__ : str = ids_tensor([self.batch_size] , self.num_choices )
a__ : Any = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _A ( self ):
"""simple docstring"""
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
a__ : List[Any] = MPNetModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
a__ : Union[str, Any] = model(__UpperCAmelCase , __UpperCAmelCase )
a__ : List[str] = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
a__ : List[str] = MPNetForQuestionAnswering(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
a__ : int = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
a__ : Optional[Any] = self.num_labels
a__ : Union[str, Any] = MPNetForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
a__ : Optional[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
a__ : Optional[int] = self.num_choices
a__ : int = MPNetForMultipleChoice(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
a__ : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a__ : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a__ : Optional[Any] = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
a__ : int = self.num_labels
a__ : int = MPNetForTokenClassification(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
a__ : Optional[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A ( self ):
"""simple docstring"""
a__ : Optional[int] = self.prepare_config_and_inputs()
((a__) , (a__) , (a__) , (a__) , (a__) , (a__)) : Tuple = config_and_inputs
a__ : List[str] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A :Dict = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
A :Optional[Any] = (
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
A :List[str] = False
A :List[str] = True
def _A ( self ):
"""simple docstring"""
a__ : Tuple = MPNetModelTester(self )
a__ : int = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def _A ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _A ( self ):
"""simple docstring"""
a__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*__UpperCAmelCase )
def _A ( self ):
"""simple docstring"""
a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*__UpperCAmelCase )
def _A ( self ):
"""simple docstring"""
a__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*__UpperCAmelCase )
def _A ( self ):
"""simple docstring"""
a__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*__UpperCAmelCase )
def _A ( self ):
"""simple docstring"""
a__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*__UpperCAmelCase )
@require_torch
class _a ( unittest.TestCase ):
'''simple docstring'''
@slow
def _A ( self ):
"""simple docstring"""
a__ : Tuple = MPNetModel.from_pretrained("microsoft/mpnet-base" )
a__ : Union[str, Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
a__ : List[Any] = model(__UpperCAmelCase )[0]
a__ : Union[str, Any] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __UpperCAmelCase )
a__ : Tuple = torch.tensor(
[[[-0.0_5_5_0, 0.1_9_4_3, -0.0_7_4_0], [-0.0_5_6_2, 0.2_2_1_1, -0.0_5_7_9], [-0.0_4_3_7, 0.3_3_3_7, -0.0_6_4_1]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 191
|
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
lowerCamelCase = logging.getLogger(__name__)
@dataclass
class _a :
'''simple docstring'''
A :Optional[int] = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A :bool = field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
A :bool = field(
default=SCREAMING_SNAKE_CASE , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
A :Optional[int] = field(
default=SCREAMING_SNAKE_CASE , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
A :Optional[int] = field(
default=SCREAMING_SNAKE_CASE , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
A :Optional[int] = field(
default=SCREAMING_SNAKE_CASE , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
@dataclass
class _a :
'''simple docstring'''
A :str = field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
A :str = field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."} )
A :Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "Train language if it is different from the evaluation language."} )
A :Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
A :Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
A :Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
A :Optional[bool] = field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"} , )
A :bool = field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
A :str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
A :bool = field(
default=SCREAMING_SNAKE_CASE , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
A :bool = field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def SCREAMING_SNAKE_CASE( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a__ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
a__ , a__ , a__ : Optional[int] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_xnli" , __UpperCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
a__ : int = training_args.get_process_log_level()
logger.setLevel(__UpperCamelCase )
datasets.utils.logging.set_verbosity(__UpperCamelCase )
transformers.utils.logging.set_verbosity(__UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
a__ : Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
a__ : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
a__ : Any = load_dataset(
"xnli" , model_args.language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
a__ : List[str] = load_dataset(
"xnli" , model_args.train_language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
a__ : List[Any] = train_dataset.features["label"].names
if training_args.do_eval:
a__ : Tuple = load_dataset(
"xnli" , model_args.language , split="validation" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
a__ : int = eval_dataset.features["label"].names
if training_args.do_predict:
a__ : Dict = load_dataset(
"xnli" , model_args.language , split="test" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
a__ : Optional[Any] = predict_dataset.features["label"].names
# Labels
a__ : List[Any] = len(__UpperCamelCase )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a__ : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCamelCase , idalabel={str(__UpperCamelCase ): label for i, label in enumerate(__UpperCamelCase )} , labelaid={label: i for i, label in enumerate(__UpperCamelCase )} , finetuning_task="xnli" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
a__ : Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
a__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
a__ : Dict = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
a__ : Optional[Any] = False
def preprocess_function(__UpperCamelCase ):
# Tokenize the texts
return tokenizer(
examples["premise"] , examples["hypothesis"] , padding=__UpperCamelCase , max_length=data_args.max_seq_length , truncation=__UpperCamelCase , )
if training_args.do_train:
if data_args.max_train_samples is not None:
a__ : int = min(len(__UpperCamelCase ) , data_args.max_train_samples )
a__ : Tuple = train_dataset.select(range(__UpperCamelCase ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
a__ : Optional[int] = train_dataset.map(
__UpperCamelCase , batched=__UpperCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on train dataset" , )
# Log a few random samples from the training set:
for index in random.sample(range(len(__UpperCamelCase ) ) , 3 ):
logger.info(F'Sample {index} of the training set: {train_dataset[index]}.' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
a__ : Optional[int] = min(len(__UpperCamelCase ) , data_args.max_eval_samples )
a__ : Optional[Any] = eval_dataset.select(range(__UpperCamelCase ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
a__ : List[Any] = eval_dataset.map(
__UpperCamelCase , batched=__UpperCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on validation dataset" , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
a__ : int = min(len(__UpperCamelCase ) , data_args.max_predict_samples )
a__ : List[str] = predict_dataset.select(range(__UpperCamelCase ) )
with training_args.main_process_first(desc="prediction dataset map pre-processing" ):
a__ : Union[str, Any] = predict_dataset.map(
__UpperCamelCase , batched=__UpperCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on prediction dataset" , )
# Get the metric function
a__ : List[Any] = evaluate.load("xnli" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__UpperCamelCase ):
a__ : Dict = p.predictions[0] if isinstance(p.predictions , __UpperCamelCase ) else p.predictions
a__ : Any = np.argmax(__UpperCamelCase , axis=1 )
return metric.compute(predictions=__UpperCamelCase , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
a__ : List[Any] = default_data_collator
elif training_args.fpaa:
a__ : List[Any] = DataCollatorWithPadding(__UpperCamelCase , pad_to_multiple_of=8 )
else:
a__ : Tuple = None
# Initialize our Trainer
a__ : List[str] = Trainer(
model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__UpperCamelCase , tokenizer=__UpperCamelCase , data_collator=__UpperCamelCase , )
# Training
if training_args.do_train:
a__ : str = None
if training_args.resume_from_checkpoint is not None:
a__ : int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
a__ : Optional[int] = last_checkpoint
a__ : Any = trainer.train(resume_from_checkpoint=__UpperCamelCase )
a__ : Tuple = train_result.metrics
a__ : Union[str, Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__UpperCamelCase )
)
a__ : Optional[int] = min(__UpperCamelCase , len(__UpperCamelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" , __UpperCamelCase )
trainer.save_metrics("train" , __UpperCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
a__ : Optional[int] = trainer.evaluate(eval_dataset=__UpperCamelCase )
a__ : Tuple = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__UpperCamelCase )
a__ : List[Any] = min(__UpperCamelCase , len(__UpperCamelCase ) )
trainer.log_metrics("eval" , __UpperCamelCase )
trainer.save_metrics("eval" , __UpperCamelCase )
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***" )
a__ , a__ , a__ : int = trainer.predict(__UpperCamelCase , metric_key_prefix="predict" )
a__ : int = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(__UpperCamelCase )
)
a__ : Union[str, Any] = min(__UpperCamelCase , len(__UpperCamelCase ) )
trainer.log_metrics("predict" , __UpperCamelCase )
trainer.save_metrics("predict" , __UpperCamelCase )
a__ : Dict = np.argmax(__UpperCamelCase , axis=1 )
a__ : str = os.path.join(training_args.output_dir , "predictions.txt" )
if trainer.is_world_process_zero():
with open(__UpperCamelCase , "w" ) as writer:
writer.write("index\tprediction\n" )
for index, item in enumerate(__UpperCamelCase ):
a__ : int = label_list[item]
writer.write(F'{index}\t{item}\n' )
if __name__ == "__main__":
main()
| 191
| 1
|
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__snake_case = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__snake_case = direct_transformers_import(PATH_TO_TRANSFORMERS)
__snake_case = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__snake_case = re.compile(R"\[(.+?)\]\((https://huggingface\.co/.+?)\)")
__snake_case = {
"DecisionTransformerConfig",
"EncoderDecoderConfig",
"MusicgenConfig",
"RagConfig",
"SpeechEncoderDecoderConfig",
"TimmBackboneConfig",
"VisionEncoderDecoderConfig",
"VisionTextDualEncoderConfig",
"LlamaConfig",
}
def _lowercase ( SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
UpperCamelCase = None
# source code of `config_class`
UpperCamelCase = inspect.getsource(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = _re_checkpoint.findall(SCREAMING_SNAKE_CASE_ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("""/""" ):
UpperCamelCase = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
UpperCamelCase = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
UpperCamelCase = ckpt_name
break
return checkpoint
def _lowercase ( ):
"""simple docstring"""
UpperCamelCase = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
UpperCamelCase = get_checkpoint_from_config_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
UpperCamelCase = """\n""".join(sorted(SCREAMING_SNAKE_CASE_ ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 181
|
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase ( __snake_case ):
lowercase = ["""image_processor""", """tokenizer"""]
lowercase = """FlavaImageProcessor"""
lowercase = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Optional[int] , __magic_name__ : Optional[int]=None , __magic_name__ : str=None , **__magic_name__ : Any ):
"""simple docstring"""
UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __magic_name__ , )
UpperCamelCase = kwargs.pop("""feature_extractor""" )
UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__magic_name__ , __magic_name__ )
UpperCamelCase = self.image_processor
def __call__( self : int , __magic_name__ : Optional[ImageInput] = None , __magic_name__ : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , __magic_name__ : bool = True , __magic_name__ : Union[bool, str, PaddingStrategy] = False , __magic_name__ : Union[bool, str, TruncationStrategy] = False , __magic_name__ : Optional[int] = None , __magic_name__ : int = 0 , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = True , __magic_name__ : Optional[Union[str, TensorType]] = None , **__magic_name__ : str , ):
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
UpperCamelCase = self.tokenizer(
text=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_token_type_ids=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
if images is not None:
UpperCamelCase = self.image_processor(
__magic_name__ , return_image_mask=__magic_name__ , return_codebook_pixels=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
if text is not None and images is not None:
encoding.update(__magic_name__ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__magic_name__ ) , tensor_type=__magic_name__ )
def lowerCamelCase_ ( self : Tuple , *__magic_name__ : Dict , **__magic_name__ : List[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def lowerCamelCase_ ( self : int , *__magic_name__ : Tuple , **__magic_name__ : Optional[Any] ):
"""simple docstring"""
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.tokenizer.model_input_names
UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __magic_name__ , )
return self.image_processor_class
@property
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __magic_name__ , )
return self.image_processor
| 181
| 1
|
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
a = logging.get_logger(__name__) # pylint: disable=invalid-name
class a_ ( snake_case ):
def __init__( self : List[Any] , a_ : WhisperForConditionalGeneration , a_ : WhisperProcessor , a_ : AutoencoderKL , a_ : CLIPTextModel , a_ : CLIPTokenizer , a_ : UNetaDConditionModel , a_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , a_ : StableDiffusionSafetyChecker , a_ : CLIPImageProcessor , ) -> Any:
super().__init__()
if safety_checker is None:
logger.warning(
F'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
speech_model=a_ , speech_processor=a_ , vae=a_ , text_encoder=a_ , tokenizer=a_ , unet=a_ , scheduler=a_ , feature_extractor=a_ , )
def UpperCamelCase ( self : List[str] , a_ : Optional[Union[str, int]] = "auto" ) -> int:
if slice_size == "auto":
snake_case: Optional[Any] =self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(a_ )
def UpperCamelCase ( self : List[str] ) -> List[Any]:
self.enable_attention_slicing(a_ )
@torch.no_grad()
def __call__( self : List[Any] , a_ : List[str] , a_ : Optional[Any]=1_6_0_0_0 , a_ : int = 5_1_2 , a_ : int = 5_1_2 , a_ : int = 5_0 , a_ : float = 7.5 , a_ : Optional[Union[str, List[str]]] = None , a_ : Optional[int] = 1 , a_ : float = 0.0 , a_ : Optional[torch.Generator] = None , a_ : Optional[torch.FloatTensor] = None , a_ : Optional[str] = "pil" , a_ : bool = True , a_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , a_ : int = 1 , **a_ : Any , ) -> List[Any]:
snake_case: Tuple =self.speech_processor.feature_extractor(
a_ , return_tensors='pt' , sampling_rate=a_ ).input_features.to(self.device )
snake_case: Union[str, Any] =self.speech_model.generate(a_ , max_length=4_8_0_0_0_0 )
snake_case: int =self.speech_processor.tokenizer.batch_decode(a_ , skip_special_tokens=a_ , normalize=a_ )[
0
]
if isinstance(a_ , a_ ):
snake_case: Tuple =1
elif isinstance(a_ , a_ ):
snake_case: int =len(a_ )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(a_ )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(a_ , a_ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(a_ )}.''' )
# get prompt text embeddings
snake_case: List[str] =self.tokenizer(
a_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
snake_case: Optional[int] =text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
snake_case: Any =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
snake_case: Optional[int] =text_input_ids[:, : self.tokenizer.model_max_length]
snake_case: Dict =self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
snake_case , snake_case , snake_case: List[Any] =text_embeddings.shape
snake_case: Optional[Any] =text_embeddings.repeat(1 , a_ , 1 )
snake_case: int =text_embeddings.view(bs_embed * num_images_per_prompt , a_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
snake_case: int =guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
snake_case: List[str]
if negative_prompt is None:
snake_case: Optional[Any] =[''] * batch_size
elif type(a_ ) is not type(a_ ):
raise TypeError(
F'''`negative_prompt` should be the same type to `prompt`, but got {type(a_ )} !='''
F''' {type(a_ )}.''' )
elif isinstance(a_ , a_ ):
snake_case: List[Any] =[negative_prompt]
elif batch_size != len(a_ ):
raise ValueError(
F'''`negative_prompt`: {negative_prompt} has batch size {len(a_ )}, but `prompt`:'''
F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
' the batch size of `prompt`.' )
else:
snake_case: Any =negative_prompt
snake_case: Tuple =text_input_ids.shape[-1]
snake_case: Dict =self.tokenizer(
a_ , padding='max_length' , max_length=a_ , truncation=a_ , return_tensors='pt' , )
snake_case: List[Any] =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
snake_case: int =uncond_embeddings.shape[1]
snake_case: Any =uncond_embeddings.repeat(1 , a_ , 1 )
snake_case: List[Any] =uncond_embeddings.view(batch_size * num_images_per_prompt , a_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case: Tuple =torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
snake_case: Union[str, Any] =(batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
snake_case: List[Any] =text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
snake_case: Optional[int] =torch.randn(a_ , generator=a_ , device='cpu' , dtype=a_ ).to(
self.device )
else:
snake_case: str =torch.randn(a_ , generator=a_ , device=self.device , dtype=a_ )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
snake_case: Optional[int] =latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(a_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
snake_case: Any =self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
snake_case: Tuple =latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case: Any ='eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case: List[str] ={}
if accepts_eta:
snake_case: Dict =eta
for i, t in enumerate(self.progress_bar(a_ ) ):
# expand the latents if we are doing classifier free guidance
snake_case: Dict =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case: List[Any] =self.scheduler.scale_model_input(a_ , a_ )
# predict the noise residual
snake_case: Optional[int] =self.unet(a_ , a_ , encoder_hidden_states=a_ ).sample
# perform guidance
if do_classifier_free_guidance:
snake_case , snake_case: str =noise_pred.chunk(2 )
snake_case: Optional[Any] =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
snake_case: Tuple =self.scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(a_ , a_ , a_ )
snake_case: Optional[Any] =1 / 0.1_8_2_1_5 * latents
snake_case: Dict =self.vae.decode(a_ ).sample
snake_case: List[Any] =(image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
snake_case: Union[str, Any] =image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case: List[str] =self.numpy_to_pil(a_ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=a_ , nsfw_content_detected=a_ )
| 350
|
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class a_ ( snake_case ):
def __lt__( self : List[Any] , a_ : Optional[int] ) -> List[str]:
return self[-1] < other[-1]
def __eq__( self : int , a_ : Union[str, Any] ) -> List[str]:
return self[-1] == other[-1]
def a_ ( __UpperCAmelCase ) -> list:
"""simple docstring"""
snake_case: list[Stack] =[]
# sort into stacks
for element in collection:
snake_case: int =Stack([element] )
snake_case: Union[str, Any] =bisect_left(__UpperCAmelCase , __UpperCAmelCase )
if i != len(__UpperCAmelCase ):
stacks[i].append(__UpperCAmelCase )
else:
stacks.append(__UpperCAmelCase )
# use a heap-based merge to merge stack efficiently
snake_case: int =merge(*(reversed(__UpperCAmelCase ) for stack in stacks) )
return collection
if __name__ == "__main__":
a = input('Enter numbers separated by a comma:\n').strip()
a = [int(item) for item in user_input.split(',')]
print(patience_sort(unsorted))
| 350
| 1
|
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : set[int] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
A_ : set[int] = set()
return any(
node not in visited and depth_first_search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for node in graph )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
visited.add(SCREAMING_SNAKE_CASE )
rec_stk.add(SCREAMING_SNAKE_CASE )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(SCREAMING_SNAKE_CASE )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 709
|
from typing import Dict
from .base import GenericTensor, Pipeline
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
def _snake_case ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE )->Optional[int]:
'''simple docstring'''
if tokenize_kwargs is None:
A_ : Optional[int] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' )
A_ : Optional[Any] = truncation
A_ : Dict = tokenize_kwargs
A_ : Union[str, Any] = {}
if return_tensors is not None:
A_ : Union[str, Any] = return_tensors
return preprocess_params, {}, postprocess_params
def _snake_case ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->Dict[str, GenericTensor]:
'''simple docstring'''
A_ : Optional[Any] = self.framework
A_ : Tuple = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return model_inputs
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Optional[int]:
'''simple docstring'''
A_ : str = self.model(**_SCREAMING_SNAKE_CASE )
return model_outputs
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False )->Any:
'''simple docstring'''
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->List[str]:
'''simple docstring'''
return super().__call__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 152
| 0
|
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 47
|
'''simple docstring'''
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , UpperCAmelCase__ : Dict , ):
'''simple docstring'''
lowercase : Any =parent
lowercase : Optional[int] =13
lowercase : Union[str, Any] =7
lowercase : str =30
lowercase : Optional[int] =self.seq_length + self.mem_len
lowercase : Dict =15
lowercase : List[str] =True
lowercase : Optional[int] =True
lowercase : Tuple =99
lowercase : str =[10, 50, 80]
lowercase : List[Any] =32
lowercase : Optional[int] =32
lowercase : int =4
lowercase : Any =8
lowercase : List[Any] =128
lowercase : List[str] =2
lowercase : Tuple =2
lowercase : int =None
lowercase : Optional[int] =1
lowercase : int =0
lowercase : List[str] =3
lowercase : str =self.vocab_size - 1
lowercase : Tuple =0.01
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : str =None
if self.use_labels:
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Union[str, Any] =TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
random.seed(self.seed )
tf.random.set_seed(self.seed )
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] ):
'''simple docstring'''
lowercase : Tuple =TFTransfoXLModel(UpperCAmelCase__ )
lowercase , lowercase : Optional[Any] =model(UpperCAmelCase__ ).to_tuple()
lowercase : List[str] ={'''input_ids''': input_ids_a, '''mems''': mems_a}
lowercase , lowercase : Any =model(UpperCAmelCase__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : int =TFTransfoXLLMHeadModel(UpperCAmelCase__ )
lowercase , lowercase : Tuple =model(UpperCAmelCase__ ).to_tuple()
lowercase : Optional[Any] ={'''input_ids''': input_ids_a, '''labels''': lm_labels}
lowercase , lowercase : Optional[int] =model(UpperCAmelCase__ ).to_tuple()
lowercase , lowercase : List[str] =model([input_ids_a, mems_a] ).to_tuple()
lowercase : int ={'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
lowercase , lowercase : str =model(UpperCAmelCase__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : Optional[int] =TFTransfoXLForSequenceClassification(UpperCAmelCase__ )
lowercase : Union[str, Any] =model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : List[Any] =self.prepare_config_and_inputs()
((lowercase) , (lowercase) , (lowercase) , (lowercase)) : Optional[Any] =config_and_inputs
lowercase : Union[str, Any] ={'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
lowerCamelCase_ = () if is_tf_available() else ()
lowerCamelCase_ = (
{
'feature-extraction': TFTransfoXLModel,
'text-classification': TFTransfoXLForSequenceClassification,
'text-generation': TFTransfoXLLMHeadModel,
'zero-shot': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ):
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Any =TFTransfoXLModelTester(self )
lowercase : Optional[int] =ConfigTester(self , config_class=UpperCAmelCase__ , d_embed=37 )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self.model_tester.set_seed()
lowercase : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.model_tester.set_seed()
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase , lowercase : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
lowercase : int =[TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
lowercase : str =model_class(UpperCAmelCase__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
lowercase : Union[str, Any] =model.get_output_embeddings()
assert isinstance(UpperCAmelCase__ , tf.keras.layers.Layer )
lowercase : Any =model.get_bias()
assert name is None
else:
lowercase : Optional[int] =model.get_output_embeddings()
assert x is None
lowercase : Optional[int] =model.get_bias()
assert name is None
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : int =TFTransfoXLModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
pass
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Optional[Any] =TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
lowercase : Tuple =tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
lowercase : Optional[int] =[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
lowercase : int =model.generate(UpperCAmelCase__ , max_length=200 , do_sample=UpperCAmelCase__ )
self.assertListEqual(output_ids[0].numpy().tolist() , UpperCAmelCase__ )
| 92
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class lowercase_ :
__UpperCAmelCase = 42
__UpperCAmelCase = None
__UpperCAmelCase = None
def _UpperCamelCase ( ) -> Node | None:
'''simple docstring'''
UpperCamelCase__ = Node(1 )
UpperCamelCase__ = Node(2 )
UpperCamelCase__ = Node(3 )
UpperCamelCase__ = Node(4 )
UpperCamelCase__ = Node(5 )
return tree
def _UpperCamelCase ( __A ) -> list[int]:
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def _UpperCamelCase ( __A ) -> list[int]:
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def _UpperCamelCase ( __A ) -> list[int]:
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def _UpperCamelCase ( __A ) -> Sequence[Node | None]:
'''simple docstring'''
UpperCamelCase__ = []
if root is None:
return output
UpperCamelCase__ = deque([root] )
while process_queue:
UpperCamelCase__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def _UpperCamelCase ( __A , __A ) -> Sequence[Node | None]:
'''simple docstring'''
UpperCamelCase__ = []
def populate_output(__A , __A ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__A , __A )
return output
def _UpperCamelCase ( __A , __A ) -> Sequence[Node | None]:
'''simple docstring'''
UpperCamelCase__ = []
def populate_output(__A , __A ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__A , __A )
return output
def _UpperCamelCase ( __A ) -> Sequence[Node | None] | list[Any]:
'''simple docstring'''
if root is None:
return []
UpperCamelCase__ = []
UpperCamelCase__ = 0
UpperCamelCase__ = height(__A )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__A , __A ) )
UpperCamelCase__ = 1
else:
output.append(get_nodes_from_right_to_left(__A , __A ) )
UpperCamelCase__ = 0
return output
def _UpperCamelCase ( ) -> None: # Main function for testing.
'''simple docstring'''
UpperCamelCase__ = make_tree()
print(F'''In-order Traversal: {inorder(__A )}''' )
print(F'''Pre-order Traversal: {preorder(__A )}''' )
print(F'''Post-order Traversal: {postorder(__A )}''' , "\n" )
print(F'''Height of Tree: {height(__A )}''' , "\n" )
print("Complete Level Order Traversal: " )
print(level_order(__A ) , "\n" )
print("Level-wise order Traversal: " )
for level in range(1 , height(__A ) + 1 ):
print(F'''Level {level}:''' , get_nodes_from_left_to_right(__A , level=__A ) )
print("\nZigZag order Traversal: " )
print(zigzag(__A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 223
|
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( __A , __A ) -> Optional[int]:
'''simple docstring'''
if len(__A ) <= 1 or n <= 1:
return
insert_next(__A , n - 1 )
rec_insertion_sort(__A , n - 1 )
def _UpperCamelCase ( __A , __A ) -> str:
'''simple docstring'''
if index >= len(__A ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
UpperCamelCase__ , UpperCamelCase__ = (
collection[index],
collection[index - 1],
)
insert_next(__A , index + 1 )
if __name__ == "__main__":
a__ : str = input('Enter integers separated by spaces: ')
a__ : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 223
| 1
|
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class _UpperCamelCase :
"""simple docstring"""
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=32 , a__=2 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=16 , a__=2 , a__=0.02 , a__=3 , a__=4 , a__=None , ) -> Dict:
A = parent
A = 13
A = 7
A = True
A = True
A = True
A = True
A = 99
A = 32
A = 2
A = 4
A = 37
A = """gelu"""
A = 0.1
A = 0.1
A = 512
A = 16
A = 2
A = 0.02
A = 3
A = 4
A = None
def _UpperCAmelCase ( self ) -> Tuple:
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A = ids_tensor([self.batch_size] , self.num_choices )
A = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=a__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> Optional[Any]:
A = TFRoFormerModel(config=a__ )
A = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A = [input_ids, input_mask]
A = model(a__ )
A = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> List[Any]:
A = True
A = TFRoFormerForCausalLM(config=a__ )
A = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
A = model(a__ )["""logits"""]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def _UpperCAmelCase ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> Union[str, Any]:
A = TFRoFormerForMaskedLM(config=a__ )
A = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
A = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> Dict:
A = self.num_labels
A = TFRoFormerForSequenceClassification(config=a__ )
A = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
A = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> Any:
A = self.num_choices
A = TFRoFormerForMultipleChoice(config=a__ )
A = tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) )
A = tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) )
A = tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) )
A = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
A = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> str:
A = self.num_labels
A = TFRoFormerForTokenClassification(config=a__ )
A = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
A = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> Optional[int]:
A = TFRoFormerForQuestionAnswering(config=a__ )
A = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
A = model(a__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self ) -> int:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _UpperCamelCase ( __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase = (
{
'feature-extraction': TFRoFormerModel,
'fill-mask': TFRoFormerForMaskedLM,
'question-answering': TFRoFormerForQuestionAnswering,
'text-classification': TFRoFormerForSequenceClassification,
'text-generation': TFRoFormerForCausalLM,
'token-classification': TFRoFormerForTokenClassification,
'zero-shot': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
def _UpperCAmelCase ( self , a__ , a__ , a__ , a__ , a__ ) -> Optional[Any]:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def _UpperCAmelCase ( self ) -> Any:
A = TFRoFormerModelTester(self )
A = ConfigTester(self , config_class=a__ , hidden_size=37 )
def _UpperCAmelCase ( self ) -> str:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def _UpperCAmelCase ( self ) -> str:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a__ )
def _UpperCAmelCase ( self ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*a__ )
def _UpperCAmelCase ( self ) -> List[str]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*a__ )
def _UpperCAmelCase ( self ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a__ )
def _UpperCAmelCase ( self ) -> Any:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a__ )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a__ )
@slow
def _UpperCAmelCase ( self ) -> str:
A = TFRoFormerModel.from_pretrained("""junnyu/roformer_chinese_base""" )
self.assertIsNotNone(a__ )
@require_tf
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
A = TFRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
A = tf.constant([[0, 1, 2, 3, 4, 5]] )
A = model(a__ )[0]
# TODO Replace vocab size
A = 5_0000
A = [1, 6, vocab_size]
self.assertEqual(output.shape , a__ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
A = tf.constant(
[
[
[-0.12_05_33_41, -1.0_26_49_01, 0.29_22_19_46],
[-1.5_13_37_83, 0.19_74_33, 0.15_19_06_07],
[-5.0_13_54_03, -3.90_02_56, -0.84_03_87_64],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , a__ , atol=1e-4 )
@require_tf
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = 1E-4
def _UpperCAmelCase ( self ) -> Union[str, Any]:
A = tf.constant([[4, 10]] )
A = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
A = emba(input_ids.shape )
A = tf.constant(
[[0.00_00, 0.00_00, 0.00_00, 1.00_00, 1.00_00, 1.00_00], [0.84_15, 0.04_64, 0.00_22, 0.54_03, 0.99_89, 1.00_00]] )
tf.debugging.assert_near(a__ , a__ , atol=self.tolerance )
def _UpperCAmelCase ( self ) -> int:
A = tf.constant(
[
[0.00_00, 0.00_00, 0.00_00, 0.00_00, 0.00_00],
[0.84_15, 0.82_19, 0.80_20, 0.78_19, 0.76_17],
[0.90_93, 0.93_64, 0.95_81, 0.97_49, 0.98_70],
] )
A = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
A = emba.weight[:3, :5]
tf.debugging.assert_near(a__ , a__ , atol=self.tolerance )
@require_tf
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = 1E-4
def _UpperCAmelCase ( self ) -> str:
# 2,12,16,64
A = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
A = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
A = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
A = embed_positions([2, 16, 768] )[None, None, :, :]
A , A = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
a__ , a__ , a__ )
A = tf.constant(
[
[0.00_00, 0.01_00, 0.02_00, 0.03_00, 0.04_00, 0.05_00, 0.06_00, 0.07_00],
[-0.20_12, 0.88_97, 0.02_63, 0.94_01, 0.20_74, 0.94_63, 0.34_81, 0.93_43],
[-1.70_57, 0.62_71, -1.21_45, 1.38_97, -0.63_03, 1.76_47, -0.11_73, 1.89_85],
[-2.17_31, -1.63_97, -2.73_58, 0.28_54, -2.18_40, 1.71_83, -1.30_18, 2.48_71],
[0.27_17, -3.61_73, -2.92_06, -2.19_88, -3.66_38, 0.38_58, -2.91_55, 2.29_80],
[3.98_59, -2.15_80, -0.79_84, -4.49_04, -4.11_81, -2.02_52, -4.47_82, 1.12_53],
] )
A = tf.constant(
[
[0.00_00, -0.01_00, -0.02_00, -0.03_00, -0.04_00, -0.05_00, -0.06_00, -0.07_00],
[0.20_12, -0.88_97, -0.02_63, -0.94_01, -0.20_74, -0.94_63, -0.34_81, -0.93_43],
[1.70_57, -0.62_71, 1.21_45, -1.38_97, 0.63_03, -1.76_47, 0.11_73, -1.89_85],
[2.17_31, 1.63_97, 2.73_58, -0.28_54, 2.18_40, -1.71_83, 1.30_18, -2.48_71],
[-0.27_17, 3.61_73, 2.92_06, 2.19_88, 3.66_38, -0.38_58, 2.91_55, -2.29_80],
[-3.98_59, 2.15_80, 0.79_84, 4.49_04, 4.11_81, 2.02_52, 4.47_82, -1.12_53],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , a__ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , a__ , atol=self.tolerance )
| 641
|
_lowercase : Dict = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 641
| 1
|
def UpperCamelCase ( a , a ) -> int:
'''simple docstring'''
while a != 0:
__magic_name__ , __magic_name__ = b % a, a
return b
def UpperCamelCase ( a , a ) -> int:
'''simple docstring'''
if gcd(a , a ) != 1:
__magic_name__ = F'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(a )
__magic_name__ , __magic_name__ , __magic_name__ = 1, 0, a
__magic_name__ , __magic_name__ , __magic_name__ = 0, 1, m
while va != 0:
__magic_name__ = ua // va
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 712
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _SCREAMING_SNAKE_CASE ( __a ,unittest.TestCase ):
__SCREAMING_SNAKE_CASE :Optional[Any] = TextToVideoSDPipeline
__SCREAMING_SNAKE_CASE :str = TEXT_TO_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE :Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
__SCREAMING_SNAKE_CASE :Union[str, Any] = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def snake_case__ ( self : List[Any] ):
torch.manual_seed(0 )
__magic_name__ = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=32 , attention_head_dim=4 , )
__magic_name__ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=a__ , set_alpha_to_one=a__ , )
torch.manual_seed(0 )
__magic_name__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__magic_name__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
__magic_name__ = CLIPTextModel(a__ )
__magic_name__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__magic_name__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def snake_case__ ( self : List[str] , a__ : List[str] , a__ : int=0 ):
if str(a__ ).startswith('''mps''' ):
__magic_name__ = torch.manual_seed(a__ )
else:
__magic_name__ = torch.Generator(device=a__ ).manual_seed(a__ )
__magic_name__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def snake_case__ ( self : str ):
__magic_name__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__magic_name__ = self.get_dummy_components()
__magic_name__ = TextToVideoSDPipeline(**a__ )
__magic_name__ = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
__magic_name__ = self.get_dummy_inputs(a__ )
__magic_name__ = '''np'''
__magic_name__ = sd_pipe(**a__ ).frames
__magic_name__ = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
__magic_name__ = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case__ ( self : Tuple ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=a__ , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def snake_case__ ( self : Dict ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=a__ , expected_max_diff=1E-2 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def snake_case__ ( self : Optional[Any] ):
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def snake_case__ ( self : Optional[Any] ):
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def snake_case__ ( self : Tuple ):
pass
def snake_case__ ( self : Union[str, Any] ):
return super().test_progress_bar()
@slow
@skip_mps
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def snake_case__ ( self : List[str] ):
__magic_name__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''' )
__magic_name__ = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
__magic_name__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__magic_name__ = pipe.to('''cuda''' )
__magic_name__ = '''Spiderman is surfing'''
__magic_name__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
__magic_name__ = pipe(a__ , generator=a__ , num_inference_steps=25 , output_type='''pt''' ).frames
__magic_name__ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def snake_case__ ( self : Union[str, Any] ):
__magic_name__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''' )
__magic_name__ = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
__magic_name__ = pipe.to('''cuda''' )
__magic_name__ = '''Spiderman is surfing'''
__magic_name__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
__magic_name__ = pipe(a__ , generator=a__ , num_inference_steps=2 , output_type='''pt''' ).frames
__magic_name__ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 245
| 0
|
'''simple docstring'''
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase : str = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_sentencepiece_available():
import sentencepiece as sp
_UpperCAmelCase : Optional[int] = 5
_UpperCAmelCase : Any = 10
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCamelCase__ = SpeechaTextTokenizer
UpperCamelCase__ = False
UpperCamelCase__ = True
def _A( self ):
super().setUp()
lowercase =sp.SentencePieceProcessor()
spm_model.Load(snake_case_ )
lowercase =['''<s>''', '''<pad>''', '''</s>''', '''<unk>''']
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(snake_case_ ) )]
lowercase =dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
lowercase =Path(self.tmpdirname )
save_json(snake_case_ , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(snake_case_ , save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
lowercase =SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _A( self ):
lowercase ='''<pad>'''
lowercase =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def _A( self ):
lowercase =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(snake_case_ ) , 10_01 )
def _A( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_01 )
def _A( self ):
lowercase =SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
lowercase =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(snake_case_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case_ ) , [2_89, 50, 14, 1_74, 3_86] , )
lowercase =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
snake_case_ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
lowercase =tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(snake_case_ , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
lowercase =tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(
snake_case_ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def _A( self ):
# fmt: off
lowercase ={'''input_ids''': [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name='''facebook/s2t-small-mustc-en-de-st''' , revision='''a14f04cf0776c02f62a8cb800cf7909e15ea23ad''' , )
@require_sentencepiece
class __magic_name__ ( unittest.TestCase ):
UpperCamelCase__ = 'valhalla/s2t_mustc_multilinguial_medium'
UpperCamelCase__ = 'C\'est trop cool'
UpperCamelCase__ = 'Esto es genial'
@classmethod
def _A( cls ):
lowercase =SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def _A( self ):
self.assertEqual(self.tokenizer.lang_code_to_id['''pt'''] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id['''ru'''] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id['''it'''] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id['''de'''] , 11 )
def _A( self ):
self.assertEqual(self.tokenizer.vocab_size , 1_00_00 )
def _A( self ):
self.assertIn(snake_case_ , self.tokenizer.all_special_ids )
lowercase =[ES_CODE, 4, 16_01, 47, 76_47, 2]
lowercase =self.tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ )
lowercase =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertNotIn(self.tokenizer.eos_token , snake_case_ )
def _A( self ):
lowercase ='''fr'''
lowercase =self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , snake_case_ )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def _A( self ):
lowercase ='''fr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
lowercase ='''es'''
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 72
|
'''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_UpperCAmelCase : Dict = '''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
_UpperCAmelCase : Union[str, Any] = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
_UpperCAmelCase : Dict = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def _A( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/google-research/tree/master/rouge'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/ROUGE_(metric)''',
'''https://github.com/google-research/google-research/tree/master/rouge''',
] , )
def _A( self , snake_case_ , snake_case_ , snake_case_=None , snake_case_=True , snake_case_=False ):
if rouge_types is None:
lowercase =['''rouge1''', '''rouge2''', '''rougeL''', '''rougeLsum''']
lowercase =rouge_scorer.RougeScorer(rouge_types=snake_case_ , use_stemmer=snake_case_ )
if use_aggregator:
lowercase =scoring.BootstrapAggregator()
else:
lowercase =[]
for ref, pred in zip(snake_case_ , snake_case_ ):
lowercase =scorer.score(snake_case_ , snake_case_ )
if use_aggregator:
aggregator.add_scores(snake_case_ )
else:
scores.append(snake_case_ )
if use_aggregator:
lowercase =aggregator.aggregate()
else:
lowercase ={}
for key in scores[0]:
lowercase =[score[key] for score in scores]
return result
| 72
| 1
|
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
a_ = getLogger(__name__)
a_ = """cuda""" if torch.cuda.is_available() else """cpu"""
def a__ ( _UpperCamelCase : List[str] ,_UpperCamelCase : str ,_UpperCamelCase : str ,_UpperCamelCase : int = 8 ,_UpperCamelCase : str = DEFAULT_DEVICE ,_UpperCamelCase : Dict=False ,_UpperCamelCase : Dict="summarization" ,_UpperCamelCase : Optional[int]=None ,**_UpperCamelCase : Dict ,):
__lowerCamelCase = Path(_UpperCamelCase ).open('''w''' ,encoding='''utf-8''' )
__lowerCamelCase = str(_UpperCamelCase )
__lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained(_UpperCamelCase ).to(_UpperCamelCase )
if fpaa:
__lowerCamelCase = model.half()
__lowerCamelCase = AutoTokenizer.from_pretrained(_UpperCamelCase )
logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
__lowerCamelCase = time.time()
# update config with task specific params
use_task_specific_params(_UpperCamelCase ,_UpperCamelCase )
if prefix is None:
__lowerCamelCase = prefix or getattr(model.config ,'''prefix''' ,'''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(_UpperCamelCase ,_UpperCamelCase ) ) ):
__lowerCamelCase = [prefix + text for text in examples_chunk]
__lowerCamelCase = tokenizer(_UpperCamelCase ,return_tensors='''pt''' ,truncation=_UpperCamelCase ,padding='''longest''' ).to(_UpperCamelCase )
__lowerCamelCase = model.generate(
input_ids=batch.input_ids ,attention_mask=batch.attention_mask ,**_UpperCamelCase ,)
__lowerCamelCase = tokenizer.batch_decode(_UpperCamelCase ,skip_special_tokens=_UpperCamelCase ,clean_up_tokenization_spaces=_UpperCamelCase )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
__lowerCamelCase = int(time.time() - start_time ) # seconds
__lowerCamelCase = len(_UpperCamelCase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs ,4 )}
def a__ ( ):
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def a__ ( _UpperCamelCase : Union[str, Any]=True ):
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('''model_name''' ,type=_UpperCamelCase ,help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' ,type=_UpperCamelCase ,help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' ,type=_UpperCamelCase ,help='''where to save summaries''' )
parser.add_argument('''--reference_path''' ,type=_UpperCamelCase ,required=_UpperCamelCase ,help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' ,type=_UpperCamelCase ,required=_UpperCamelCase ,default='''metrics.json''' ,help='''where to save metrics''' )
parser.add_argument('''--device''' ,type=_UpperCamelCase ,required=_UpperCamelCase ,default=_UpperCamelCase ,help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' ,type=_UpperCamelCase ,required=_UpperCamelCase ,default=_UpperCamelCase ,help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' ,type=_UpperCamelCase ,default='''summarization''' ,help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' ,type=_UpperCamelCase ,default=8 ,required=_UpperCamelCase ,help='''batch size''' )
parser.add_argument(
'''--n_obs''' ,type=_UpperCamelCase ,default=-1 ,required=_UpperCamelCase ,help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' ,action='''store_true''' )
parser.add_argument('''--dump-args''' ,action='''store_true''' ,help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' ,nargs='''?''' ,type=_UpperCamelCase ,const=datetime_now() ,help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) ,)
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__lowerCamelCase ,__lowerCamelCase = parser.parse_known_args()
__lowerCamelCase = parse_numeric_n_bool_cl_kwargs(_UpperCamelCase )
if parsed_args and verbose:
print(F"""parsed the following generate kwargs: {parsed_args}""" )
__lowerCamelCase = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__lowerCamelCase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=_UpperCamelCase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
__lowerCamelCase = generate_summaries_or_translations(
_UpperCamelCase ,args.save_path ,args.model_name ,batch_size=args.bs ,device=args.device ,fpaa=args.fpaa ,task=args.task ,prefix=args.prefix ,**_UpperCamelCase ,)
if args.reference_path is None:
return {}
# Compute scores
__lowerCamelCase = calculate_bleu if '''translation''' in args.task else calculate_rouge
__lowerCamelCase = [x.rstrip() for x in open(args.save_path ).readlines()]
__lowerCamelCase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_UpperCamelCase )]
__lowerCamelCase = score_fn(_UpperCamelCase ,_UpperCamelCase )
scores.update(_UpperCamelCase )
if args.dump_args:
scores.update(_UpperCamelCase )
if args.info:
__lowerCamelCase = args.info
if verbose:
print(_UpperCamelCase )
if args.score_path is not None:
json.dump(_UpperCamelCase ,open(args.score_path ,'''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 721
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""uclanlp/visualbert-vqa""": """https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-pre""": """https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-vcr""": """https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-pre""": """https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-nlvr2""": """https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-pre""": """https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"""
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """visual_bert"""
def __init__( self , __UpperCAmelCase=30522 , __UpperCAmelCase=768 , __UpperCAmelCase=512 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
__lowerCamelCase = vocab_size
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = hidden_size
__lowerCamelCase = visual_embedding_dim
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = type_vocab_size
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = bypass_transformer
__lowerCamelCase = special_visual_initialize
| 622
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.